aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile7
-rw-r--r--Makefile.target5
-rw-r--r--block/backup.c1
-rw-r--r--block/gluster.c230
-rw-r--r--block/iscsi.c5
-rw-r--r--block/mirror.c14
-rw-r--r--block/nfs.c55
-rw-r--r--blockjob.c1
-rw-r--r--bsd-user/syscall.c9
-rwxr-xr-xconfigure46
-rw-r--r--cputlb.c39
-rw-r--r--crypto/Makefile.objs3
-rw-r--r--crypto/block-luks.c21
-rw-r--r--crypto/hash-gcrypt.c110
-rw-r--r--crypto/hash-nettle.c155
-rw-r--r--crypto/hash-stub.c41
-rw-r--r--crypto/hash.c109
-rw-r--r--crypto/tlscreds.c26
-rw-r--r--crypto/tlssession.c26
-rw-r--r--default-configs/arm-softmmu.mak1
-rw-r--r--default-configs/ppc64-softmmu.mak1
-rw-r--r--dma-helpers.c7
-rw-r--r--docs/memory.txt9
-rw-r--r--docs/specs/acpi_cpu_hotplug.txt94
-rw-r--r--docs/specs/acpi_nvdimm.txt132
-rw-r--r--fpu/softfloat-specialize.h659
-rw-r--r--fpu/softfloat.c172
-rw-r--r--fsdev/9p-iov-marshal.c1
-rw-r--r--fsdev/9p-marshal.c1
-rw-r--r--fsdev/file-op-9p.h1
-rw-r--r--hmp.c16
-rw-r--r--hw/9pfs/9p-synth.c200
-rw-r--r--hw/acpi/Makefile.objs2
-rw-r--r--hw/acpi/aml-build.c22
-rw-r--r--hw/acpi/cpu.c561
-rw-r--r--hw/acpi/cpu_hotplug.c21
-rw-r--r--hw/acpi/ich9.c69
-rw-r--r--hw/acpi/ipmi.c105
-rw-r--r--hw/acpi/nvdimm.c400
-rw-r--r--hw/acpi/piix4.c71
-rw-r--r--hw/acpi/trace-events14
-rw-r--r--hw/arm/ast2400.c61
-rw-r--r--hw/arm/fsl-imx25.c8
-rw-r--r--hw/arm/fsl-imx31.c9
-rw-r--r--hw/arm/fsl-imx6.c8
-rw-r--r--hw/arm/palmetto-bmc.c33
-rw-r--r--hw/arm/sabrelite.c18
-rw-r--r--hw/arm/spitz.c12
-rw-r--r--hw/arm/tosa.c5
-rw-r--r--hw/arm/virt.c1
-rw-r--r--hw/arm/xilinx_zynq.c14
-rw-r--r--hw/arm/xlnx-ep108.c9
-rw-r--r--hw/arm/z2.c6
-rw-r--r--hw/audio/pcspk.c9
-rw-r--r--hw/block/dataplane/virtio-blk.c83
-rw-r--r--hw/block/dataplane/virtio-blk.h2
-rw-r--r--hw/block/m25p80.c472
-rw-r--r--hw/block/pflash_cfi01.c1
-rw-r--r--hw/block/pflash_cfi02.c1
-rw-r--r--hw/block/virtio-blk.c57
-rw-r--r--hw/char/cadence_uart.c16
-rw-r--r--hw/char/serial.c67
-rw-r--r--hw/core/Makefile.objs1
-rw-r--r--hw/core/register.c287
-rw-r--r--hw/display/ads7846.c5
-rw-r--r--hw/display/ssd0323.c5
-rw-r--r--hw/dma/Makefile.objs1
-rw-r--r--hw/dma/xlnx-zynq-devcfg.c400
-rw-r--r--hw/i2c/smbus_ich9.c1
-rw-r--r--hw/i386/acpi-build.c80
-rw-r--r--hw/i386/intel_iommu.c12
-rw-r--r--hw/i386/kvm/pci-assign.c18
-rw-r--r--hw/i386/pc.c73
-rw-r--r--hw/i386/pc_piix.c2
-rw-r--r--hw/i386/pc_q35.c30
-rw-r--r--hw/i386/pci-assign-load-rom.c3
-rw-r--r--hw/ide/ahci.c2
-rw-r--r--hw/ide/macio.c2
-rw-r--r--hw/input/pckbd.c21
-rw-r--r--hw/intc/Makefile.objs1
-rw-r--r--hw/intc/arm_gicv3_cpuif.c2
-rw-r--r--hw/intc/armv7m_nvic.c8
-rw-r--r--hw/intc/xics.c548
-rw-r--r--hw/intc/xics_kvm.c63
-rw-r--r--hw/intc/xics_spapr.c434
-rw-r--r--hw/isa/isa-bus.c7
-rw-r--r--hw/isa/lpc_ich9.c90
-rw-r--r--hw/mem/nvdimm.c132
-rw-r--r--hw/mem/pc-dimm.c14
-rw-r--r--hw/microblaze/petalogix_ml605_mmu.c9
-rw-r--r--hw/misc/Makefile.objs1
-rw-r--r--hw/misc/aspeed_scu.c284
-rw-r--r--hw/misc/max111x.c12
-rw-r--r--hw/misc/trace-events3
-rw-r--r--hw/misc/vmport.c1
-rw-r--r--hw/net/cadence_gem.c13
-rw-r--r--hw/net/e1000.c18
-rw-r--r--hw/net/e1000e_core.c6
-rw-r--r--hw/net/e1000x_common.c2
-rw-r--r--hw/net/eepro100.c8
-rw-r--r--hw/net/mipsnet.c8
-rw-r--r--hw/net/rocker/rocker_tlv.h6
-rw-r--r--hw/net/rtl8139.c49
-rw-r--r--hw/net/virtio-net.c2
-rw-r--r--hw/net/vmware_utils.h55
-rw-r--r--hw/net/vmxnet3.c197
-rw-r--r--hw/pci-host/q35.c20
-rw-r--r--hw/ppc/Makefile.objs1
-rw-r--r--hw/ppc/e500.c2
-rw-r--r--hw/ppc/e500.h2
-rw-r--r--hw/ppc/ppc.c17
-rw-r--r--hw/ppc/ppce500_spin.c9
-rw-r--r--hw/ppc/spapr.c40
-rw-r--r--hw/ppc/spapr_cpu_core.c75
-rw-r--r--hw/ppc/spapr_events.c8
-rw-r--r--hw/ppc/spapr_hcall.c4
-rw-r--r--hw/ppc/spapr_iommu.c12
-rw-r--r--hw/ppc/spapr_pci.c94
-rw-r--r--hw/ppc/spapr_rtas_ddw.c295
-rw-r--r--hw/ppc/spapr_vio.c2
-rw-r--r--hw/ppc/trace-events4
-rw-r--r--hw/s390x/virtio-ccw.c133
-rw-r--r--hw/scsi/esp.c5
-rw-r--r--hw/scsi/virtio-scsi-dataplane.c9
-rw-r--r--hw/scsi/virtio-scsi.c5
-rw-r--r--hw/sd/ssi-sd.c9
-rw-r--r--hw/sh4/sh_pci.c4
-rw-r--r--hw/smbios/Makefile.objs1
-rw-r--r--hw/smbios/smbios.c72
-rw-r--r--hw/smbios/smbios_build.h87
-rw-r--r--hw/smbios/smbios_type_38.c117
-rw-r--r--hw/ssi/Makefile.objs1
-rw-r--r--hw/ssi/aspeed_smc.c470
-rw-r--r--hw/ssi/ssi.c6
-rw-r--r--hw/timer/mc146818rtc.c6
-rw-r--r--hw/vfio/Makefile.objs1
-rw-r--r--hw/vfio/common.c175
-rw-r--r--hw/vfio/pci-quirks.c8
-rw-r--r--hw/vfio/pci.c101
-rw-r--r--hw/vfio/pci.h1
-rw-r--r--hw/vfio/spapr.c210
-rw-r--r--hw/vfio/trace-events9
-rw-r--r--hw/virtio/vhost.c13
-rw-r--r--hw/virtio/virtio-bus.c132
-rw-r--r--hw/virtio/virtio-mmio.c128
-rw-r--r--hw/virtio/virtio-pci.c124
-rw-r--r--include/crypto/tlscreds.h1
-rw-r--r--include/elf.h2
-rw-r--r--include/exec/cpu-all.h2
-rw-r--r--include/exec/memory.h32
-rw-r--r--include/fpu/softfloat.h45
-rw-r--r--include/glib-compat.h26
-rw-r--r--include/hw/acpi/acpi_dev_interface.h7
-rw-r--r--include/hw/acpi/aml-build.h3
-rw-r--r--include/hw/acpi/cpu.h67
-rw-r--r--include/hw/acpi/cpu_hotplug.h6
-rw-r--r--include/hw/acpi/ich9.h3
-rw-r--r--include/hw/acpi/ipmi.h22
-rw-r--r--include/hw/arm/ast2400.h5
-rw-r--r--include/hw/audio/pcspk.h2
-rw-r--r--include/hw/block/flash.h3
-rw-r--r--include/hw/char/serial.h3
-rw-r--r--include/hw/cpu/core.h3
-rw-r--r--include/hw/dma/xlnx-zynq-devcfg.h62
-rw-r--r--include/hw/i386/ich9.h8
-rw-r--r--include/hw/i386/pc.h16
-rw-r--r--include/hw/ide/ahci.h (renamed from hw/ide/ahci.h)0
-rw-r--r--include/hw/ide/internal.h (renamed from hw/ide/internal.h)0
-rw-r--r--include/hw/ide/pci.h (renamed from hw/ide/pci.h)0
-rw-r--r--include/hw/isa/isa.h1
-rw-r--r--include/hw/mem/nvdimm.h55
-rw-r--r--include/hw/mem/pc-dimm.h5
-rw-r--r--include/hw/misc/aspeed_scu.h34
-rw-r--r--include/hw/pci-host/q35.h9
-rw-r--r--include/hw/pci-host/spapr.h10
-rw-r--r--include/hw/ppc/spapr.h18
-rw-r--r--include/hw/ppc/spapr_vio.h2
-rw-r--r--include/hw/ppc/xics.h55
-rw-r--r--include/hw/register.h255
-rw-r--r--include/hw/smbios/ipmi.h15
-rw-r--r--include/hw/ssi/aspeed_smc.h100
-rw-r--r--include/hw/ssi/ssi.h2
-rw-r--r--include/hw/timer/i8254.h8
-rw-r--r--include/hw/timer/i8254_internal.h8
-rw-r--r--include/hw/vfio/vfio-common.h20
-rw-r--r--include/hw/virtio/virtio-blk.h6
-rw-r--r--include/hw/virtio/virtio-bus.h31
-rw-r--r--include/io/channel.h1
-rw-r--r--include/migration/vmstate.h5
-rw-r--r--include/qemu/bitops.h3
-rw-r--r--include/qemu/range.h91
-rw-r--r--include/qemu/sockets.h15
-rw-r--r--include/sysemu/char.h16
-rw-r--r--include/ui/console.h7
-rw-r--r--io/channel-socket.c17
-rw-r--r--linux-user/host/aarch64/hostdep.h38
-rw-r--r--linux-user/host/aarch64/safe-syscall.inc.S75
-rw-r--r--linux-user/host/arm/hostdep.h38
-rw-r--r--linux-user/host/arm/safe-syscall.inc.S90
-rw-r--r--linux-user/host/i386/hostdep.h38
-rw-r--r--linux-user/host/i386/safe-syscall.inc.S112
-rw-r--r--linux-user/host/ia64/hostdep.h (renamed from linux-user/host/generic/hostdep.h)7
-rw-r--r--linux-user/host/mips/hostdep.h15
-rw-r--r--linux-user/host/ppc/hostdep.h15
-rw-r--r--linux-user/host/ppc64/hostdep.h38
-rw-r--r--linux-user/host/ppc64/safe-syscall.inc.S92
-rw-r--r--linux-user/host/s390/hostdep.h15
-rw-r--r--linux-user/host/s390x/hostdep.h38
-rw-r--r--linux-user/host/s390x/safe-syscall.inc.S90
-rw-r--r--linux-user/host/sparc/hostdep.h15
-rw-r--r--linux-user/host/sparc64/hostdep.h15
-rw-r--r--linux-user/host/x32/hostdep.h15
-rw-r--r--linux-user/host/x86_64/safe-syscall.inc.S6
-rw-r--r--linux-user/main.c46
-rw-r--r--linux-user/qemu.h11
-rw-r--r--linux-user/signal.c23
-rw-r--r--linux-user/strace.c621
-rw-r--r--linux-user/strace.list10
-rw-r--r--linux-user/syscall.c423
-rw-r--r--linux-user/syscall_defs.h24
-rw-r--r--memory.c26
-rw-r--r--net/net.c2
-rw-r--r--net/socket.c55
-rw-r--r--net/vhost-user.c7
-rw-r--r--pc-bios/bios-256k.binbin262144 -> 262144 bytes
-rw-r--r--pc-bios/bios.binbin131072 -> 131072 bytes
-rw-r--r--po/bg.po90
-rw-r--r--qapi-schema.json27
-rw-r--r--qapi/crypto.json6
-rw-r--r--qapi/string-input-visitor.c17
-rw-r--r--qapi/string-output-visitor.c4
-rw-r--r--qemu-char.c19
-rw-r--r--qemu-img.c56
-rw-r--r--qemu-img.texi13
-rw-r--r--qemu-io.c18
-rw-r--r--qemu-nbd.c19
-rw-r--r--qemu-nbd.texi3
-rw-r--r--qemu-option-trace.texi25
-rw-r--r--qemu-options.hx29
-rw-r--r--qmp-commands.hx4
-rw-r--r--qobject/json-lexer.c19
-rw-r--r--qobject/json-streamer.c6
-rw-r--r--roms/config.seabios-128k2
m---------roms/seabios0
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--scripts/qapi-visit.py6
-rw-r--r--slirp/Makefile.objs2
-rw-r--r--slirp/dhcpv6.c209
-rw-r--r--slirp/dhcpv6.h22
-rw-r--r--slirp/ip6.h9
-rw-r--r--slirp/ip6_icmp.c27
-rw-r--r--slirp/ip6_icmp.h12
-rw-r--r--slirp/libslirp.h1
-rw-r--r--slirp/slirp.c126
-rw-r--r--slirp/socket.c7
-rw-r--r--slirp/tftp.c4
-rw-r--r--slirp/udp6.c13
-rw-r--r--stubs/Makefile.objs3
-rw-r--r--stubs/ipmi.c14
-rw-r--r--stubs/pc_madt_cpu_entry.c7
-rw-r--r--stubs/smbios_type_38.c14
-rw-r--r--target-alpha/cpu.h2
-rw-r--r--target-alpha/translate.c9
-rw-r--r--target-arm/arm-semi.c47
-rw-r--r--target-arm/cpu.h2
-rw-r--r--target-arm/helper-a64.c14
-rw-r--r--target-arm/helper.c40
-rw-r--r--target-cris/cpu.h2
-rw-r--r--target-i386/cpu.h2
-rw-r--r--target-lm32/cpu.h2
-rw-r--r--target-m68k/cpu.h2
-rw-r--r--target-m68k/helper.c6
-rw-r--r--target-microblaze/cpu.h2
-rw-r--r--target-microblaze/op_helper.c6
-rw-r--r--target-mips/cpu.h18
-rw-r--r--target-mips/gdbstub.c8
-rw-r--r--target-mips/helper.h22
-rw-r--r--target-mips/msa_helper.c88
-rw-r--r--target-mips/op_helper.c400
-rw-r--r--target-mips/translate.c156
-rw-r--r--target-mips/translate_init.c29
-rw-r--r--target-moxie/cpu.h2
-rw-r--r--target-openrisc/cpu.h2
-rw-r--r--target-ppc/cpu-qom.h3
-rw-r--r--target-ppc/cpu.h20
-rw-r--r--target-ppc/excp_helper.c41
-rw-r--r--target-ppc/fpu_helper.c126
-rw-r--r--target-ppc/helper.h3
-rw-r--r--target-ppc/helper_regs.h4
-rw-r--r--target-ppc/mmu-hash64.c371
-rw-r--r--target-ppc/mmu-hash64.h8
-rw-r--r--target-ppc/timebase_helper.c10
-rw-r--r--target-ppc/translate.c22
-rw-r--r--target-ppc/translate_init.c232
-rw-r--r--target-s390x/cpu.h2
-rw-r--r--target-s390x/fpu_helper.c28
-rw-r--r--target-s390x/helper.h6
-rw-r--r--target-s390x/translate.c6
-rw-r--r--target-sh4/cpu.c1
-rw-r--r--target-sh4/cpu.h2
-rw-r--r--target-sparc/cpu.h2
-rw-r--r--target-sparc/translate.c5
-rw-r--r--target-tilegx/cpu.h2
-rw-r--r--target-tricore/cpu.h2
-rw-r--r--target-unicore32/cpu.c2
-rw-r--r--target-unicore32/cpu.h3
-rw-r--r--target-xtensa/cpu.h2
-rw-r--r--tests/Makefile.include4
-rw-r--r--tests/acpi-test-data/pc/DSDTbin5503 -> 6008 bytes
-rw-r--r--tests/acpi-test-data/pc/DSDT.bridgebin7362 -> 7867 bytes
-rw-r--r--tests/acpi-test-data/pc/DSDT.ipmikcsbin0 -> 6080 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDTbin8265 -> 8770 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDT.bridgebin8282 -> 8787 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDT.ipmibtbin0 -> 8845 bytes
-rw-r--r--tests/bios-tables-test.c60
-rwxr-xr-xtests/qemu-iotests/04130
-rw-r--r--tests/qemu-iotests/041.out4
-rwxr-xr-xtests/qemu-iotests/14912
-rw-r--r--tests/qemu-iotests/149.out240
-rw-r--r--tests/test-crypto-hash.c53
-rw-r--r--tests/test-crypto-xts.c18
-rw-r--r--tests/test-io-channel-socket.c2
-rw-r--r--tests/test-qmp-input-visitor.c12
-rw-r--r--tests/vhost-user-test.c17
-rw-r--r--trace-events16
-rw-r--r--trace/control.c42
-rw-r--r--trace/control.h25
-rw-r--r--ui/vnc.c23
-rw-r--r--user-exec.c107
-rw-r--r--util/Makefile.objs1
-rw-r--r--util/qemu-sockets.c54
-rw-r--r--util/range.c76
-rw-r--r--vl.c48
334 files changed, 12818 insertions, 3424 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2ab6e3bdf8..1d0e2c39ce 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -888,12 +888,13 @@ F: include/hw/virtio/
virtio-9p
M: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
-M: Greg Kurz <gkurz@linux.vnet.ibm.com>
+M: Greg Kurz <groug@kaod.org>
S: Supported
F: hw/9pfs/
F: fsdev/
F: tests/virtio-9p-test.c
T: git git://github.com/kvaneesh/QEMU.git
+T: git git://github.com/gkurz/qemu.git 9p-next
virtio-blk
M: Stefan Hajnoczi <stefanha@redhat.com>
@@ -1261,7 +1262,6 @@ F: docs/tracing.txt
T: git git://github.com/stefanha/qemu.git tracing
Checkpatch
-M: Blue Swirl <blauwirbel@gmail.com>
S: Odd Fixes
F: scripts/checkpatch.pl
@@ -1334,8 +1334,7 @@ F: thunk.c
F: user-exec.c
BSD user
-M: Blue Swirl <blauwirbel@gmail.com>
-S: Maintained
+S: Orphan
F: bsd-user/
Linux user
@@ -1398,8 +1397,7 @@ F: tcg/s390/
F: disas/s390.c
SPARC target
-M: Blue Swirl <blauwirbel@gmail.com>
-S: Maintained
+S: Odd Fixes
F: tcg/sparc/
F: disas/sparc.c
diff --git a/Makefile b/Makefile
index c1ac21ddbc..c054bc6356 100644
--- a/Makefile
+++ b/Makefile
@@ -565,8 +565,9 @@ qemu.1: qemu-doc.texi qemu-options.texi qemu-monitor.texi qemu-monitor-info.texi
perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu.pod && \
$(POD2MAN) --section=1 --center=" " --release=" " qemu.pod > $@, \
" GEN $@")
+qemu.1: qemu-option-trace.texi
-qemu-img.1: qemu-img.texi qemu-img-cmds.texi
+qemu-img.1: qemu-img.texi qemu-option-trace.texi qemu-img-cmds.texi
$(call quiet-command, \
perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-img.pod && \
$(POD2MAN) --section=1 --center=" " --release=" " qemu-img.pod > $@, \
@@ -578,7 +579,7 @@ fsdev/virtfs-proxy-helper.1: fsdev/virtfs-proxy-helper.texi
$(POD2MAN) --section=1 --center=" " --release=" " fsdev/virtfs-proxy-helper.pod > $@, \
" GEN $@")
-qemu-nbd.8: qemu-nbd.texi
+qemu-nbd.8: qemu-nbd.texi qemu-option-trace.texi
$(call quiet-command, \
perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-nbd.pod && \
$(POD2MAN) --section=8 --center=" " --release=" " qemu-nbd.pod > $@, \
@@ -596,7 +597,7 @@ info: qemu-doc.info qemu-tech.info
pdf: qemu-doc.pdf qemu-tech.pdf
qemu-doc.dvi qemu-doc.html qemu-doc.info qemu-doc.pdf: \
- qemu-img.texi qemu-nbd.texi qemu-options.texi \
+ qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \
qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
qemu-monitor-info.texi
diff --git a/Makefile.target b/Makefile.target
index d720b3e733..a440bcb5b8 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -108,11 +108,8 @@ obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/dpd/decimal128.o
ifdef CONFIG_LINUX_USER
-# Note that we only add linux-user/host/$ARCH if it exists, and
-# that it must come before linux-user/host/generic in the search path.
QEMU_CFLAGS+=-I$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR) \
- $(patsubst %,-I%,$(wildcard $(SRC_PATH)/linux-user/host/$(ARCH))) \
- -I$(SRC_PATH)/linux-user/host/generic \
+ -I$(SRC_PATH)/linux-user/host/$(ARCH) \
-I$(SRC_PATH)/linux-user
obj-y += linux-user/
diff --git a/block/backup.c b/block/backup.c
index 581269b29a..f87f8d539b 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -489,7 +489,6 @@ void backup_start(BlockDriverState *bs, BlockDriverState *target,
assert(bs);
assert(target);
- assert(cb);
if (bs == target) {
error_setg(errp, "Source and target cannot be the same");
diff --git a/block/gluster.c b/block/gluster.c
index d361d8e847..16f7778a50 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -24,6 +24,8 @@ typedef struct GlusterAIOCB {
typedef struct BDRVGlusterState {
struct glfs *glfs;
struct glfs_fd *fd;
+ bool supports_seek_data;
+ int debug_level;
} BDRVGlusterState;
typedef struct GlusterConf {
@@ -32,6 +34,7 @@ typedef struct GlusterConf {
char *volname;
char *image;
char *transport;
+ int debug_level;
} GlusterConf;
static void qemu_gluster_gconf_free(GlusterConf *gconf)
@@ -194,11 +197,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
goto out;
}
- /*
- * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
- * GlusterFS makes GF_LOG_* macros available to libgfapi users.
- */
- ret = glfs_set_logging(glfs, "-", 4);
+ ret = glfs_set_logging(glfs, "-", gconf->debug_level);
if (ret < 0) {
goto out;
}
@@ -256,16 +255,26 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
qemu_bh_schedule(acb->bh);
}
+#define GLUSTER_OPT_FILENAME "filename"
+#define GLUSTER_OPT_DEBUG "debug"
+#define GLUSTER_DEBUG_DEFAULT 4
+#define GLUSTER_DEBUG_MAX 9
+
/* TODO Convert to fine grained options */
static QemuOptsList runtime_opts = {
.name = "gluster",
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
.desc = {
{
- .name = "filename",
+ .name = GLUSTER_OPT_FILENAME,
.type = QEMU_OPT_STRING,
.help = "URL to the gluster image",
},
+ {
+ .name = GLUSTER_OPT_DEBUG,
+ .type = QEMU_OPT_NUMBER,
+ .help = "Gluster log level, valid range is 0-9",
+ },
{ /* end of list */ }
},
};
@@ -287,6 +296,28 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
}
}
+/*
+ * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of
+ * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used.
+ * - Corrected versions return -1 and set errno to EINVAL.
+ * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set
+ * errno to ENXIO when SEEK_DATA is called with a position of EOF.
+ */
+static bool qemu_gluster_test_seek(struct glfs_fd *fd)
+{
+ off_t ret, eof;
+
+ eof = glfs_lseek(fd, 0, SEEK_END);
+ if (eof < 0) {
+ /* this should never occur */
+ return false;
+ }
+
+ /* this should always fail with ENXIO if SEEK_DATA is supported */
+ ret = glfs_lseek(fd, eof, SEEK_DATA);
+ return (ret < 0) && (errno == ENXIO);
+}
+
static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
int bdrv_flags, Error **errp)
{
@@ -306,8 +337,17 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
goto out;
}
- filename = qemu_opt_get(opts, "filename");
+ filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
+ s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
+ GLUSTER_DEBUG_DEFAULT);
+ if (s->debug_level < 0) {
+ s->debug_level = 0;
+ } else if (s->debug_level > GLUSTER_DEBUG_MAX) {
+ s->debug_level = GLUSTER_DEBUG_MAX;
+ }
+
+ gconf->debug_level = s->debug_level;
s->glfs = qemu_gluster_init(gconf, filename, errp);
if (!s->glfs) {
ret = -errno;
@@ -338,6 +378,8 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
ret = -errno;
}
+ s->supports_seek_data = qemu_gluster_test_seek(s->fd);
+
out:
qemu_opts_del(opts);
qemu_gluster_gconf_free(gconf);
@@ -363,6 +405,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
{
int ret = 0;
+ BDRVGlusterState *s;
BDRVGlusterReopenState *reop_s;
GlusterConf *gconf = NULL;
int open_flags = 0;
@@ -370,6 +413,8 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
assert(state != NULL);
assert(state->bs != NULL);
+ s = state->bs->opaque;
+
state->opaque = g_new0(BDRVGlusterReopenState, 1);
reop_s = state->opaque;
@@ -377,6 +422,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
gconf = g_new0(GlusterConf, 1);
+ gconf->debug_level = s->debug_level;
reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
if (reop_s->glfs == NULL) {
ret = -errno;
@@ -510,6 +556,14 @@ static int qemu_gluster_create(const char *filename,
char *tmp = NULL;
GlusterConf *gconf = g_new0(GlusterConf, 1);
+ gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG,
+ GLUSTER_DEBUG_DEFAULT);
+ if (gconf->debug_level < 0) {
+ gconf->debug_level = 0;
+ } else if (gconf->debug_level > GLUSTER_DEBUG_MAX) {
+ gconf->debug_level = GLUSTER_DEBUG_MAX;
+ }
+
glfs = qemu_gluster_init(gconf, filename, errp);
if (!glfs) {
ret = -errno;
@@ -727,6 +781,159 @@ static int qemu_gluster_has_zero_init(BlockDriverState *bs)
return 0;
}
+/*
+ * Find allocation range in @bs around offset @start.
+ * May change underlying file descriptor's file offset.
+ * If @start is not in a hole, store @start in @data, and the
+ * beginning of the next hole in @hole, and return 0.
+ * If @start is in a non-trailing hole, store @start in @hole and the
+ * beginning of the next non-hole in @data, and return 0.
+ * If @start is in a trailing hole or beyond EOF, return -ENXIO.
+ * If we can't find out, return a negative errno other than -ENXIO.
+ *
+ * (Shamefully copied from raw-posix.c, only miniscule adaptions.)
+ */
+static int find_allocation(BlockDriverState *bs, off_t start,
+ off_t *data, off_t *hole)
+{
+ BDRVGlusterState *s = bs->opaque;
+ off_t offs;
+
+ if (!s->supports_seek_data) {
+ return -ENOTSUP;
+ }
+
+ /*
+ * SEEK_DATA cases:
+ * D1. offs == start: start is in data
+ * D2. offs > start: start is in a hole, next data at offs
+ * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
+ * or start is beyond EOF
+ * If the latter happens, the file has been truncated behind
+ * our back since we opened it. All bets are off then.
+ * Treating like a trailing hole is simplest.
+ * D4. offs < 0, errno != ENXIO: we learned nothing
+ */
+ offs = glfs_lseek(s->fd, start, SEEK_DATA);
+ if (offs < 0) {
+ return -errno; /* D3 or D4 */
+ }
+ assert(offs >= start);
+
+ if (offs > start) {
+ /* D2: in hole, next data at offs */
+ *hole = start;
+ *data = offs;
+ return 0;
+ }
+
+ /* D1: in data, end not yet known */
+
+ /*
+ * SEEK_HOLE cases:
+ * H1. offs == start: start is in a hole
+ * If this happens here, a hole has been dug behind our back
+ * since the previous lseek().
+ * H2. offs > start: either start is in data, next hole at offs,
+ * or start is in trailing hole, EOF at offs
+ * Linux treats trailing holes like any other hole: offs ==
+ * start. Solaris seeks to EOF instead: offs > start (blech).
+ * If that happens here, a hole has been dug behind our back
+ * since the previous lseek().
+ * H3. offs < 0, errno = ENXIO: start is beyond EOF
+ * If this happens, the file has been truncated behind our
+ * back since we opened it. Treat it like a trailing hole.
+ * H4. offs < 0, errno != ENXIO: we learned nothing
+ * Pretend we know nothing at all, i.e. "forget" about D1.
+ */
+ offs = glfs_lseek(s->fd, start, SEEK_HOLE);
+ if (offs < 0) {
+ return -errno; /* D1 and (H3 or H4) */
+ }
+ assert(offs >= start);
+
+ if (offs > start) {
+ /*
+ * D1 and H2: either in data, next hole at offs, or it was in
+ * data but is now in a trailing hole. In the latter case,
+ * all bets are off. Treating it as if it there was data all
+ * the way to EOF is safe, so simply do that.
+ */
+ *data = start;
+ *hole = offs;
+ return 0;
+ }
+
+ /* D1 and H1 */
+ return -EBUSY;
+}
+
+/*
+ * Returns the allocation status of the specified sectors.
+ *
+ * If 'sector_num' is beyond the end of the disk image the return value is 0
+ * and 'pnum' is set to 0.
+ *
+ * 'pnum' is set to the number of sectors (including and immediately following
+ * the specified sector) that are known to be in the same
+ * allocated/unallocated state.
+ *
+ * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
+ * beyond the end of the disk image it will be clamped.
+ *
+ * (Based on raw_co_get_block_status() from raw-posix.c.)
+ */
+static int64_t coroutine_fn qemu_gluster_co_get_block_status(
+ BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
+ BlockDriverState **file)
+{
+ BDRVGlusterState *s = bs->opaque;
+ off_t start, data = 0, hole = 0;
+ int64_t total_size;
+ int ret = -EINVAL;
+
+ if (!s->fd) {
+ return ret;
+ }
+
+ start = sector_num * BDRV_SECTOR_SIZE;
+ total_size = bdrv_getlength(bs);
+ if (total_size < 0) {
+ return total_size;
+ } else if (start >= total_size) {
+ *pnum = 0;
+ return 0;
+ } else if (start + nb_sectors * BDRV_SECTOR_SIZE > total_size) {
+ nb_sectors = DIV_ROUND_UP(total_size - start, BDRV_SECTOR_SIZE);
+ }
+
+ ret = find_allocation(bs, start, &data, &hole);
+ if (ret == -ENXIO) {
+ /* Trailing hole */
+ *pnum = nb_sectors;
+ ret = BDRV_BLOCK_ZERO;
+ } else if (ret < 0) {
+ /* No info available, so pretend there are no holes */
+ *pnum = nb_sectors;
+ ret = BDRV_BLOCK_DATA;
+ } else if (data == start) {
+ /* On a data extent, compute sectors to the end of the extent,
+ * possibly including a partial sector at EOF. */
+ *pnum = MIN(nb_sectors, DIV_ROUND_UP(hole - start, BDRV_SECTOR_SIZE));
+ ret = BDRV_BLOCK_DATA;
+ } else {
+ /* On a hole, compute sectors to the beginning of the next extent. */
+ assert(hole == start);
+ *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE);
+ ret = BDRV_BLOCK_ZERO;
+ }
+
+ *file = bs;
+
+ return ret | BDRV_BLOCK_OFFSET_VALID | start;
+}
+
+
static QemuOptsList qemu_gluster_create_opts = {
.name = "qemu-gluster-create-opts",
.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -741,6 +948,11 @@ static QemuOptsList qemu_gluster_create_opts = {
.type = QEMU_OPT_STRING,
.help = "Preallocation mode (allowed values: off, full)"
},
+ {
+ .name = GLUSTER_OPT_DEBUG,
+ .type = QEMU_OPT_NUMBER,
+ .help = "Gluster log level, valid range is 0-9",
+ },
{ /* end of list */ }
}
};
@@ -769,6 +981,7 @@ static BlockDriver bdrv_gluster = {
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
#endif
+ .bdrv_co_get_block_status = qemu_gluster_co_get_block_status,
.create_opts = &qemu_gluster_create_opts,
};
@@ -796,6 +1009,7 @@ static BlockDriver bdrv_gluster_tcp = {
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
#endif
+ .bdrv_co_get_block_status = qemu_gluster_co_get_block_status,
.create_opts = &qemu_gluster_create_opts,
};
@@ -823,6 +1037,7 @@ static BlockDriver bdrv_gluster_unix = {
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
#endif
+ .bdrv_co_get_block_status = qemu_gluster_co_get_block_status,
.create_opts = &qemu_gluster_create_opts,
};
@@ -850,6 +1065,7 @@ static BlockDriver bdrv_gluster_rdma = {
#ifdef CONFIG_GLUSTERFS_ZEROFILL
.bdrv_co_pwrite_zeroes = qemu_gluster_co_pwrite_zeroes,
#endif
+ .bdrv_co_get_block_status = qemu_gluster_co_get_block_status,
.create_opts = &qemu_gluster_create_opts,
};
diff --git a/block/iscsi.c b/block/iscsi.c
index 7e78adea15..9bb5ff6216 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -417,7 +417,7 @@ static bool is_byte_request_lun_aligned(int64_t offset, int count,
static bool is_sector_request_lun_aligned(int64_t sector_num, int nb_sectors,
IscsiLun *iscsilun)
{
- assert(nb_sectors < BDRV_REQUEST_MAX_SECTORS);
+ assert(nb_sectors <= BDRV_REQUEST_MAX_SECTORS);
return is_byte_request_lun_aligned(sector_num << BDRV_SECTOR_BITS,
nb_sectors << BDRV_SECTOR_BITS,
iscsilun);
@@ -661,7 +661,8 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
int64_t ret;
int pnum;
BlockDriverState *file;
- ret = iscsi_co_get_block_status(bs, sector_num, INT_MAX, &pnum, &file);
+ ret = iscsi_co_get_block_status(bs, sector_num,
+ BDRV_REQUEST_MAX_SECTORS, &pnum, &file);
if (ret < 0) {
return ret;
}
diff --git a/block/mirror.c b/block/mirror.c
index a04ed9c7a4..8d96049555 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -218,7 +218,9 @@ static inline void mirror_wait_for_io(MirrorBlockJob *s)
}
/* Submit async read while handling COW.
- * Returns: nb_sectors if no alignment is necessary, or
+ * Returns: The number of sectors copied after and including sector_num,
+ * excluding any sectors copied prior to sector_num due to alignment.
+ * This will be nb_sectors if no alignment is necessary, or
* (new_end - sector_num) if tail is rounded up or down due to
* alignment or buffer limit.
*/
@@ -227,14 +229,18 @@ static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
{
BlockBackend *source = s->common.blk;
int sectors_per_chunk, nb_chunks;
- int ret = nb_sectors;
+ int ret;
MirrorOp *op;
+ int max_sectors;
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
+ max_sectors = sectors_per_chunk * s->max_iov;
/* We can only handle as much as buf_size at a time. */
nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
+ nb_sectors = MIN(max_sectors, nb_sectors);
assert(nb_sectors);
+ ret = nb_sectors;
if (s->cow_bitmap) {
ret += mirror_cow_align(s, &sector_num, &nb_sectors);
@@ -327,7 +333,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
first_chunk = sector_num / sectors_per_chunk;
while (test_bit(first_chunk, s->in_flight_bitmap)) {
- trace_mirror_yield_in_flight(s, first_chunk, s->in_flight);
+ trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
mirror_wait_for_io(s);
}
@@ -769,7 +775,7 @@ static void mirror_complete(BlockJob *job, Error **errp)
}
}
- /* check the target bs is not blocked and block all operations on it */
+ /* block all operations on to_replace bs */
if (s->replaces) {
AioContext *replace_aio_context;
diff --git a/block/nfs.c b/block/nfs.c
index 9f51cc3f10..15d6832c4c 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -1,7 +1,7 @@
/*
* QEMU Block driver for native access to files on NFS shares
*
- * Copyright (c) 2014 Peter Lieven <pl@kamp.de>
+ * Copyright (c) 2014-2016 Peter Lieven <pl@kamp.de>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -38,6 +38,7 @@
#include <nfsc/libnfs.h>
#define QEMU_NFS_MAX_READAHEAD_SIZE 1048576
+#define QEMU_NFS_MAX_PAGECACHE_SIZE (8388608 / NFS_BLKSIZE)
#define QEMU_NFS_MAX_DEBUG_LEVEL 2
typedef struct NFSClient {
@@ -47,6 +48,7 @@ typedef struct NFSClient {
bool has_zero_init;
AioContext *aio_context;
blkcnt_t st_blocks;
+ bool cache_used;
} NFSClient;
typedef struct NFSRPC {
@@ -278,7 +280,7 @@ static void nfs_file_close(BlockDriverState *bs)
}
static int64_t nfs_client_open(NFSClient *client, const char *filename,
- int flags, Error **errp)
+ int flags, Error **errp, int open_flags)
{
int ret = -EINVAL, i;
struct stat st;
@@ -330,12 +332,38 @@ static int64_t nfs_client_open(NFSClient *client, const char *filename,
nfs_set_tcp_syncnt(client->context, val);
#ifdef LIBNFS_FEATURE_READAHEAD
} else if (!strcmp(qp->p[i].name, "readahead")) {
+ if (open_flags & BDRV_O_NOCACHE) {
+ error_setg(errp, "Cannot enable NFS readahead "
+ "if cache.direct = on");
+ goto fail;
+ }
if (val > QEMU_NFS_MAX_READAHEAD_SIZE) {
error_report("NFS Warning: Truncating NFS readahead"
" size to %d", QEMU_NFS_MAX_READAHEAD_SIZE);
val = QEMU_NFS_MAX_READAHEAD_SIZE;
}
nfs_set_readahead(client->context, val);
+#ifdef LIBNFS_FEATURE_PAGECACHE
+ nfs_set_pagecache_ttl(client->context, 0);
+#endif
+ client->cache_used = true;
+#endif
+#ifdef LIBNFS_FEATURE_PAGECACHE
+ nfs_set_pagecache_ttl(client->context, 0);
+ } else if (!strcmp(qp->p[i].name, "pagecache")) {
+ if (open_flags & BDRV_O_NOCACHE) {
+ error_setg(errp, "Cannot enable NFS pagecache "
+ "if cache.direct = on");
+ goto fail;
+ }
+ if (val > QEMU_NFS_MAX_PAGECACHE_SIZE) {
+ error_report("NFS Warning: Truncating NFS pagecache"
+ " size to %d pages", QEMU_NFS_MAX_PAGECACHE_SIZE);
+ val = QEMU_NFS_MAX_PAGECACHE_SIZE;
+ }
+ nfs_set_pagecache(client->context, val);
+ nfs_set_pagecache_ttl(client->context, 0);
+ client->cache_used = true;
#endif
#ifdef LIBNFS_FEATURE_DEBUG
} else if (!strcmp(qp->p[i].name, "debug")) {
@@ -418,7 +446,7 @@ static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags,
}
ret = nfs_client_open(client, qemu_opt_get(opts, "filename"),
(flags & BDRV_O_RDWR) ? O_RDWR : O_RDONLY,
- errp);
+ errp, bs->open_flags);
if (ret < 0) {
goto out;
}
@@ -454,7 +482,7 @@ static int nfs_file_create(const char *url, QemuOpts *opts, Error **errp)
total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
BDRV_SECTOR_SIZE);
- ret = nfs_client_open(client, url, O_CREAT, errp);
+ ret = nfs_client_open(client, url, O_CREAT, errp, 0);
if (ret < 0) {
goto out;
}
@@ -516,6 +544,12 @@ static int nfs_reopen_prepare(BDRVReopenState *state,
return -EACCES;
}
+ if ((state->flags & BDRV_O_NOCACHE) && client->cache_used) {
+ error_setg(errp, "Cannot disable cache if libnfs readahead or"
+ " pagecache is enabled");
+ return -EINVAL;
+ }
+
/* Update cache for read-only reopens */
if (!(state->flags & BDRV_O_RDWR)) {
ret = nfs_fstat(client->context, client->fh, &st);
@@ -530,6 +564,15 @@ static int nfs_reopen_prepare(BDRVReopenState *state,
return 0;
}
+#ifdef LIBNFS_FEATURE_PAGECACHE
+static void nfs_invalidate_cache(BlockDriverState *bs,
+ Error **errp)
+{
+ NFSClient *client = bs->opaque;
+ nfs_pagecache_invalidate(client->context, client->fh);
+}
+#endif
+
static BlockDriver bdrv_nfs = {
.format_name = "nfs",
.protocol_name = "nfs",
@@ -553,6 +596,10 @@ static BlockDriver bdrv_nfs = {
.bdrv_detach_aio_context = nfs_detach_aio_context,
.bdrv_attach_aio_context = nfs_attach_aio_context,
+
+#ifdef LIBNFS_FEATURE_PAGECACHE
+ .bdrv_invalidate_cache = nfs_invalidate_cache,
+#endif
};
static void nfs_block_init(void)
diff --git a/blockjob.c b/blockjob.c
index 90c4e262b0..205da9df4e 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -110,6 +110,7 @@ void *block_job_create(const BlockJobDriver *driver, BlockDriverState *bs,
BlockBackend *blk;
BlockJob *job;
+ assert(cb);
if (bs->job) {
error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
return NULL;
diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c
index a9fe8693c1..66492aaf5d 100644
--- a/bsd-user/syscall.c
+++ b/bsd-user/syscall.c
@@ -315,12 +315,14 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8)
{
+ CPUState *cpu = ENV_GET_CPU(cpu_env);
abi_long ret;
void *p;
#ifdef DEBUG
gemu_log("freebsd syscall %d\n", num);
#endif
+ trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
if(do_strace)
print_freebsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
@@ -400,6 +402,7 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1,
#endif
if (do_strace)
print_freebsd_syscall_ret(num, ret);
+ trace_guest_user_syscall_ret(cpu, num, ret);
return ret;
efault:
ret = -TARGET_EFAULT;
@@ -410,12 +413,14 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6)
{
+ CPUState *cpu = ENV_GET_CPU(cpu_env);
abi_long ret;
void *p;
#ifdef DEBUG
gemu_log("netbsd syscall %d\n", num);
#endif
+ trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
if(do_strace)
print_netbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
@@ -472,6 +477,7 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1,
#endif
if (do_strace)
print_netbsd_syscall_ret(num, ret);
+ trace_guest_user_syscall_ret(cpu, num, ret);
return ret;
efault:
ret = -TARGET_EFAULT;
@@ -482,12 +488,14 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
abi_long arg2, abi_long arg3, abi_long arg4,
abi_long arg5, abi_long arg6)
{
+ CPUState *cpu = ENV_GET_CPU(cpu_env);
abi_long ret;
void *p;
#ifdef DEBUG
gemu_log("openbsd syscall %d\n", num);
#endif
+ trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0);
if(do_strace)
print_openbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
@@ -544,6 +552,7 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1,
#endif
if (do_strace)
print_openbsd_syscall_ret(num, ret);
+ trace_guest_user_syscall_ret(cpu, num, ret);
return ret;
efault:
ret = -TARGET_EFAULT;
diff --git a/configure b/configure
index 5929aba98c..67beb47aca 100755
--- a/configure
+++ b/configure
@@ -305,8 +305,8 @@ archipelago="no"
gtk=""
gtkabi=""
gtk_gl="no"
+tls_priority="NORMAL"
gnutls=""
-gnutls_hash=""
gnutls_rnd=""
nettle=""
nettle_kdf="no"
@@ -1097,6 +1097,8 @@ for opt do
;;
--enable-gtk) gtk="yes"
;;
+ --tls-priority=*) tls_priority="$optarg"
+ ;;
--disable-gnutls) gnutls="no"
;;
--enable-gnutls) gnutls="yes"
@@ -1216,6 +1218,13 @@ esac
QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS"
EXTRA_CFLAGS="$CPU_CFLAGS $EXTRA_CFLAGS"
+# For user-mode emulation the host arch has to be one we explicitly
+# support, even if we're using TCI.
+if [ "$ARCH" = "unknown" ]; then
+ bsd_user="no"
+ linux_user="no"
+fi
+
default_target_list=""
mak_wilds=""
@@ -1301,6 +1310,7 @@ Advanced options (experts only):
--disable-blobs disable installing provided firmware blobs
--with-vss-sdk=SDK-path enable Windows VSS support in QEMU Guest Agent
--with-win-sdk=SDK-path path to Windows Platform SDK (to build VSS .tlb)
+ --tls-priority default TLS protocol/cipher priority string
Optional features, enabled with --enable-FEATURE and
disabled with --disable-FEATURE, default is enabled if available:
@@ -1380,7 +1390,6 @@ fi
if test "$ARCH" = "unknown"; then
if test "$tcg_interpreter" = "yes" ; then
echo "Unsupported CPU = $cpu, will use TCG with TCI (experimental)"
- ARCH=tci
else
error_exit "Unsupported CPU = $cpu, try --enable-tcg-interpreter"
fi
@@ -1792,8 +1801,10 @@ int foo(void *a) __attribute__((ifunc("bar_ifunc")));
int main(int argc, char *argv[]) { return foo(argv[0]);}
EOF
if compile_object "" ; then
- if readelf --syms $TMPO |grep "IFUNC.*foo" >/dev/null 2>&1; then
- avx2_opt="yes"
+ if has readelf; then
+ if readelf --syms $TMPO 2>/dev/null |grep -q "IFUNC.*foo"; then
+ avx2_opt="yes"
+ fi
fi
fi
@@ -2210,13 +2221,6 @@ if test "$gnutls" != "no"; then
QEMU_CFLAGS="$QEMU_CFLAGS $gnutls_cflags"
gnutls="yes"
- # gnutls_hash_init requires >= 2.9.10
- if $pkg_config --exists "gnutls >= 2.9.10"; then
- gnutls_hash="yes"
- else
- gnutls_hash="no"
- fi
-
# gnutls_rnd requires >= 2.11.0
if $pkg_config --exists "gnutls >= 2.11.0"; then
gnutls_rnd="yes"
@@ -2250,11 +2254,9 @@ if test "$gnutls" != "no"; then
feature_not_found "gnutls" "Install gnutls devel"
else
gnutls="no"
- gnutls_hash="no"
gnutls_rnd="no"
fi
else
- gnutls_hash="no"
gnutls_rnd="no"
fi
@@ -4703,7 +4705,7 @@ if test "$cpu" = "s390x" ; then
fi
# Probe for the need for relocating the user-only binary.
-if test "$pie" = "no" ; then
+if ( [ "$linux_user" = yes ] || [ "$bsd_user" = yes ] ) && [ "$pie" = no ]; then
textseg_addr=
case "$cpu" in
arm | i386 | ppc* | s390* | sparc* | x86_64 | x32)
@@ -4725,6 +4727,16 @@ EOF
# In case ld does not support -Ttext-segment, edit the default linker
# script via sed to set the .text start addr. This is needed on FreeBSD
# at least.
+ if ! $ld --verbose >/dev/null 2>&1; then
+ error_exit \
+ "We need to link the QEMU user mode binaries at a" \
+ "specific text address. Unfortunately your linker" \
+ "doesn't support either the -Ttext-segment option or" \
+ "printing the default linker script with --verbose." \
+ "If you don't want the user mode binaries, pass the" \
+ "--disable-user option to configure."
+ fi
+
$ld --verbose | sed \
-e '1,/==================================================/d' \
-e '/==================================================/,$d' \
@@ -4794,8 +4806,8 @@ echo "SDL support $sdl $(echo_version $sdl $sdlversion)"
echo "GTK support $gtk $(echo_version $gtk $gtk_version)"
echo "GTK GL support $gtk_gl"
echo "VTE support $vte $(echo_version $vte $vteversion)"
+echo "TLS priority $tls_priority"
echo "GNUTLS support $gnutls"
-echo "GNUTLS hash $gnutls_hash"
echo "GNUTLS rnd $gnutls_rnd"
echo "libgcrypt $gcrypt"
echo "libgcrypt kdf $gcrypt_kdf"
@@ -5158,12 +5170,10 @@ if test "$gtk" = "yes" ; then
echo "CONFIG_GTK_GL=y" >> $config_host_mak
fi
fi
+echo "CONFIG_TLS_PRIORITY=\"$tls_priority\"" >> $config_host_mak
if test "$gnutls" = "yes" ; then
echo "CONFIG_GNUTLS=y" >> $config_host_mak
fi
-if test "$gnutls_hash" = "yes" ; then
- echo "CONFIG_GNUTLS_HASH=y" >> $config_host_mak
-fi
if test "$gnutls_rnd" = "yes" ; then
echo "CONFIG_GNUTLS_RND=y" >> $config_host_mak
fi
diff --git a/cputlb.c b/cputlb.c
index 23c9b91200..079e4979ca 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -30,6 +30,8 @@
#include "exec/ram_addr.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
+#include "qemu/error-report.h"
+#include "exec/log.h"
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -427,6 +429,39 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
prot, mmu_idx, size);
}
+static void report_bad_exec(CPUState *cpu, target_ulong addr)
+{
+ /* Accidentally executing outside RAM or ROM is quite common for
+ * several user-error situations, so report it in a way that
+ * makes it clear that this isn't a QEMU bug and provide suggestions
+ * about what a user could do to fix things.
+ */
+ error_report("Trying to execute code outside RAM or ROM at 0x"
+ TARGET_FMT_lx, addr);
+ error_printf("This usually means one of the following happened:\n\n"
+ "(1) You told QEMU to execute a kernel for the wrong machine "
+ "type, and it crashed on startup (eg trying to run a "
+ "raspberry pi kernel on a versatilepb QEMU machine)\n"
+ "(2) You didn't give QEMU a kernel or BIOS filename at all, "
+ "and QEMU executed a ROM full of no-op instructions until "
+ "it fell off the end\n"
+ "(3) Your guest kernel has a bug and crashed by jumping "
+ "off into nowhere\n\n"
+ "This is almost always one of the first two, so check your "
+ "command line and that you are using the right type of kernel "
+ "for this machine.\n"
+ "If you think option (3) is likely then you can try debugging "
+ "your guest with the -d debug options; in particular "
+ "-d guest_errors will cause the log to include a dump of the "
+ "guest register state at this point.\n\n"
+ "Execution cannot continue; stopping here.\n\n");
+
+ /* Report also to the logs, with more detail including register dump */
+ qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
+ "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
+ log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
+}
+
/* NOTE: this function can trigger an exception */
/* NOTE2: the returned address is not exactly the physical address: it
* is actually a ram_addr_t (in system mode; the user mode emulation
@@ -455,8 +490,8 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
if (cc->do_unassigned_access) {
cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
} else {
- cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
- TARGET_FMT_lx "\n", addr);
+ report_bad_exec(cpu, addr);
+ exit(1);
}
}
p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
diff --git a/crypto/Makefile.objs b/crypto/Makefile.objs
index 0737f48118..1f86f4f07f 100644
--- a/crypto/Makefile.objs
+++ b/crypto/Makefile.objs
@@ -1,5 +1,7 @@
crypto-obj-y = init.o
crypto-obj-y += hash.o
+crypto-obj-$(CONFIG_NETTLE) += hash-nettle.o
+crypto-obj-$(if $(CONFIG_NETTLE),n,$(CONFIG_GCRYPT)) += hash-gcrypt.o
crypto-obj-y += aes.o
crypto-obj-y += desrfb.o
crypto-obj-y += cipher.o
@@ -28,3 +30,4 @@ crypto-aes-obj-y = aes.o
stub-obj-y += random-stub.o
stub-obj-y += pbkdf-stub.o
+stub-obj-y += hash-stub.o
diff --git a/crypto/block-luks.c b/crypto/block-luks.c
index 63649f1091..fcf3b040e4 100644
--- a/crypto/block-luks.c
+++ b/crypto/block-luks.c
@@ -776,6 +776,11 @@ qcrypto_block_luks_open(QCryptoBlock *block,
}
if (ivalg == QCRYPTO_IVGEN_ALG_ESSIV) {
+ if (!ivhash_name) {
+ ret = -EINVAL;
+ error_setg(errp, "Missing IV generator hash specification");
+ goto fail;
+ }
ivcipheralg = qcrypto_block_luks_essiv_cipher(cipheralg,
ivhash,
&local_err);
@@ -785,6 +790,13 @@ qcrypto_block_luks_open(QCryptoBlock *block,
goto fail;
}
} else {
+ /* Note we parsed the ivhash_name earlier in the cipher_mode
+ * spec string even with plain/plain64 ivgens, but we
+ * will ignore it, since it is irrelevant for these ivgens.
+ * This is for compat with dm-crypt which will silently
+ * ignore hash names with these ivgens rather than report
+ * an error about the invalid usage
+ */
ivcipheralg = cipheralg;
}
@@ -904,6 +916,15 @@ qcrypto_block_luks_create(QCryptoBlock *block,
if (!luks_opts.has_hash_alg) {
luks_opts.hash_alg = QCRYPTO_HASH_ALG_SHA256;
}
+ if (luks_opts.ivgen_alg == QCRYPTO_IVGEN_ALG_ESSIV) {
+ if (!luks_opts.has_ivgen_hash_alg) {
+ luks_opts.ivgen_hash_alg = QCRYPTO_HASH_ALG_SHA256;
+ luks_opts.has_ivgen_hash_alg = true;
+ }
+ }
+ /* Note we're allowing ivgen_hash_alg to be set even for
+ * non-essiv iv generators that don't need a hash. It will
+ * be silently ignored, for compatibility with dm-crypt */
if (!options->u.luks.key_secret) {
error_setg(errp, "Parameter 'key-secret' is required for cipher");
diff --git a/crypto/hash-gcrypt.c b/crypto/hash-gcrypt.c
new file mode 100644
index 0000000000..8ea5aff4ee
--- /dev/null
+++ b/crypto/hash-gcrypt.c
@@ -0,0 +1,110 @@
+/*
+ * QEMU Crypto hash algorithms
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "crypto/hash.h"
+#include "gcrypt.h"
+
+
+static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = {
+ [QCRYPTO_HASH_ALG_MD5] = GCRY_MD_MD5,
+ [QCRYPTO_HASH_ALG_SHA1] = GCRY_MD_SHA1,
+ [QCRYPTO_HASH_ALG_SHA224] = GCRY_MD_SHA224,
+ [QCRYPTO_HASH_ALG_SHA256] = GCRY_MD_SHA256,
+ [QCRYPTO_HASH_ALG_SHA384] = GCRY_MD_SHA384,
+ [QCRYPTO_HASH_ALG_SHA512] = GCRY_MD_SHA512,
+ [QCRYPTO_HASH_ALG_RIPEMD160] = GCRY_MD_RMD160,
+};
+
+gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
+{
+ if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) &&
+ qcrypto_hash_alg_map[alg] != GCRY_MD_NONE) {
+ return true;
+ }
+ return false;
+}
+
+
+int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
+ const struct iovec *iov,
+ size_t niov,
+ uint8_t **result,
+ size_t *resultlen,
+ Error **errp)
+{
+ int i, ret;
+ gcry_md_hd_t md;
+ unsigned char *digest;
+
+ if (alg >= G_N_ELEMENTS(qcrypto_hash_alg_map) ||
+ qcrypto_hash_alg_map[alg] == GCRY_MD_NONE) {
+ error_setg(errp,
+ "Unknown hash algorithm %d",
+ alg);
+ return -1;
+ }
+
+ ret = gcry_md_open(&md, qcrypto_hash_alg_map[alg], 0);
+
+ if (ret < 0) {
+ error_setg(errp,
+ "Unable to initialize hash algorithm: %s",
+ gcry_strerror(ret));
+ return -1;
+ }
+
+ for (i = 0; i < niov; i++) {
+ gcry_md_write(md, iov[i].iov_base, iov[i].iov_len);
+ }
+
+ ret = gcry_md_get_algo_dlen(qcrypto_hash_alg_map[alg]);
+ if (ret <= 0) {
+ error_setg(errp,
+ "Unable to get hash length: %s",
+ gcry_strerror(ret));
+ goto error;
+ }
+ if (*resultlen == 0) {
+ *resultlen = ret;
+ *result = g_new0(uint8_t, *resultlen);
+ } else if (*resultlen != ret) {
+ error_setg(errp,
+ "Result buffer size %zu is smaller than hash %d",
+ *resultlen, ret);
+ goto error;
+ }
+
+ digest = gcry_md_read(md, 0);
+ if (!digest) {
+ error_setg(errp,
+ "No digest produced");
+ goto error;
+ }
+ memcpy(*result, digest, *resultlen);
+
+ gcry_md_close(md);
+ return 0;
+
+ error:
+ gcry_md_close(md);
+ return -1;
+}
diff --git a/crypto/hash-nettle.c b/crypto/hash-nettle.c
new file mode 100644
index 0000000000..4c6f50b65d
--- /dev/null
+++ b/crypto/hash-nettle.c
@@ -0,0 +1,155 @@
+/*
+ * QEMU Crypto hash algorithms
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "crypto/hash.h"
+#include <nettle/md5.h>
+#include <nettle/sha.h>
+#include <nettle/ripemd160.h>
+
+typedef void (*qcrypto_nettle_init)(void *ctx);
+typedef void (*qcrypto_nettle_write)(void *ctx,
+ unsigned int len,
+ const uint8_t *buf);
+typedef void (*qcrypto_nettle_result)(void *ctx,
+ unsigned int len,
+ uint8_t *buf);
+
+union qcrypto_hash_ctx {
+ struct md5_ctx md5;
+ struct sha1_ctx sha1;
+ struct sha224_ctx sha224;
+ struct sha256_ctx sha256;
+ struct sha384_ctx sha384;
+ struct sha512_ctx sha512;
+ struct ripemd160_ctx ripemd160;
+};
+
+struct qcrypto_hash_alg {
+ qcrypto_nettle_init init;
+ qcrypto_nettle_write write;
+ qcrypto_nettle_result result;
+ size_t len;
+} qcrypto_hash_alg_map[] = {
+ [QCRYPTO_HASH_ALG_MD5] = {
+ .init = (qcrypto_nettle_init)md5_init,
+ .write = (qcrypto_nettle_write)md5_update,
+ .result = (qcrypto_nettle_result)md5_digest,
+ .len = MD5_DIGEST_SIZE,
+ },
+ [QCRYPTO_HASH_ALG_SHA1] = {
+ .init = (qcrypto_nettle_init)sha1_init,
+ .write = (qcrypto_nettle_write)sha1_update,
+ .result = (qcrypto_nettle_result)sha1_digest,
+ .len = SHA1_DIGEST_SIZE,
+ },
+ [QCRYPTO_HASH_ALG_SHA224] = {
+ .init = (qcrypto_nettle_init)sha224_init,
+ .write = (qcrypto_nettle_write)sha224_update,
+ .result = (qcrypto_nettle_result)sha224_digest,
+ .len = SHA224_DIGEST_SIZE,
+ },
+ [QCRYPTO_HASH_ALG_SHA256] = {
+ .init = (qcrypto_nettle_init)sha256_init,
+ .write = (qcrypto_nettle_write)sha256_update,
+ .result = (qcrypto_nettle_result)sha256_digest,
+ .len = SHA256_DIGEST_SIZE,
+ },
+ [QCRYPTO_HASH_ALG_SHA384] = {
+ .init = (qcrypto_nettle_init)sha384_init,
+ .write = (qcrypto_nettle_write)sha384_update,
+ .result = (qcrypto_nettle_result)sha384_digest,
+ .len = SHA384_DIGEST_SIZE,
+ },
+ [QCRYPTO_HASH_ALG_SHA512] = {
+ .init = (qcrypto_nettle_init)sha512_init,
+ .write = (qcrypto_nettle_write)sha512_update,
+ .result = (qcrypto_nettle_result)sha512_digest,
+ .len = SHA512_DIGEST_SIZE,
+ },
+ [QCRYPTO_HASH_ALG_RIPEMD160] = {
+ .init = (qcrypto_nettle_init)ripemd160_init,
+ .write = (qcrypto_nettle_write)ripemd160_update,
+ .result = (qcrypto_nettle_result)ripemd160_digest,
+ .len = RIPEMD160_DIGEST_SIZE,
+ },
+};
+
+gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
+{
+ if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map) &&
+ qcrypto_hash_alg_map[alg].init != NULL) {
+ return true;
+ }
+ return false;
+}
+
+
+int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
+ const struct iovec *iov,
+ size_t niov,
+ uint8_t **result,
+ size_t *resultlen,
+ Error **errp)
+{
+ int i;
+ union qcrypto_hash_ctx ctx;
+
+ if (alg >= G_N_ELEMENTS(qcrypto_hash_alg_map) ||
+ qcrypto_hash_alg_map[alg].init == NULL) {
+ error_setg(errp,
+ "Unknown hash algorithm %d",
+ alg);
+ return -1;
+ }
+
+ qcrypto_hash_alg_map[alg].init(&ctx);
+
+ for (i = 0; i < niov; i++) {
+ /* Some versions of nettle have functions
+ * declared with 'int' instead of 'size_t'
+ * so to be safe avoid writing more than
+ * UINT_MAX bytes at a time
+ */
+ size_t len = iov[i].iov_len;
+ uint8_t *base = iov[i].iov_base;
+ while (len) {
+ size_t shortlen = MIN(len, UINT_MAX);
+ qcrypto_hash_alg_map[alg].write(&ctx, len, base);
+ len -= shortlen;
+ base += len;
+ }
+ }
+
+ if (*resultlen == 0) {
+ *resultlen = qcrypto_hash_alg_map[alg].len;
+ *result = g_new0(uint8_t, *resultlen);
+ } else if (*resultlen != qcrypto_hash_alg_map[alg].len) {
+ error_setg(errp,
+ "Result buffer size %zu is smaller than hash %zu",
+ *resultlen, qcrypto_hash_alg_map[alg].len);
+ return -1;
+ }
+
+ qcrypto_hash_alg_map[alg].result(&ctx, *resultlen, *result);
+
+ return 0;
+}
diff --git a/crypto/hash-stub.c b/crypto/hash-stub.c
new file mode 100644
index 0000000000..8a9b8d4c09
--- /dev/null
+++ b/crypto/hash-stub.c
@@ -0,0 +1,41 @@
+/*
+ * QEMU Crypto hash algorithms
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "crypto/hash.h"
+
+gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg G_GNUC_UNUSED)
+{
+ return false;
+}
+
+int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
+ const struct iovec *iov G_GNUC_UNUSED,
+ size_t niov G_GNUC_UNUSED,
+ uint8_t **result G_GNUC_UNUSED,
+ size_t *resultlen G_GNUC_UNUSED,
+ Error **errp)
+{
+ error_setg(errp,
+ "Hash algorithm %d not supported without GNUTLS",
+ alg);
+ return -1;
+}
diff --git a/crypto/hash.c b/crypto/hash.c
index 2907bffd2e..0f1ceac66a 100644
--- a/crypto/hash.c
+++ b/crypto/hash.c
@@ -22,16 +22,14 @@
#include "qapi/error.h"
#include "crypto/hash.h"
-#ifdef CONFIG_GNUTLS_HASH
-#include <gnutls/gnutls.h>
-#include <gnutls/crypto.h>
-#endif
-
-
static size_t qcrypto_hash_alg_size[QCRYPTO_HASH_ALG__MAX] = {
[QCRYPTO_HASH_ALG_MD5] = 16,
[QCRYPTO_HASH_ALG_SHA1] = 20,
+ [QCRYPTO_HASH_ALG_SHA224] = 28,
[QCRYPTO_HASH_ALG_SHA256] = 32,
+ [QCRYPTO_HASH_ALG_SHA384] = 48,
+ [QCRYPTO_HASH_ALG_SHA512] = 64,
+ [QCRYPTO_HASH_ALG_RIPEMD160] = 20,
};
size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg)
@@ -41,105 +39,6 @@ size_t qcrypto_hash_digest_len(QCryptoHashAlgorithm alg)
}
-#ifdef CONFIG_GNUTLS_HASH
-static int qcrypto_hash_alg_map[QCRYPTO_HASH_ALG__MAX] = {
- [QCRYPTO_HASH_ALG_MD5] = GNUTLS_DIG_MD5,
- [QCRYPTO_HASH_ALG_SHA1] = GNUTLS_DIG_SHA1,
- [QCRYPTO_HASH_ALG_SHA256] = GNUTLS_DIG_SHA256,
-};
-
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg)
-{
- if (alg < G_N_ELEMENTS(qcrypto_hash_alg_map)) {
- return true;
- }
- return false;
-}
-
-
-int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov,
- size_t niov,
- uint8_t **result,
- size_t *resultlen,
- Error **errp)
-{
- int i, ret;
- gnutls_hash_hd_t dig;
-
- if (alg >= G_N_ELEMENTS(qcrypto_hash_alg_map)) {
- error_setg(errp,
- "Unknown hash algorithm %d",
- alg);
- return -1;
- }
-
- ret = gnutls_hash_init(&dig, qcrypto_hash_alg_map[alg]);
-
- if (ret < 0) {
- error_setg(errp,
- "Unable to initialize hash algorithm: %s",
- gnutls_strerror(ret));
- return -1;
- }
-
- for (i = 0; i < niov; i++) {
- ret = gnutls_hash(dig, iov[i].iov_base, iov[i].iov_len);
- if (ret < 0) {
- error_setg(errp,
- "Unable process hash data: %s",
- gnutls_strerror(ret));
- goto error;
- }
- }
-
- ret = gnutls_hash_get_len(qcrypto_hash_alg_map[alg]);
- if (ret <= 0) {
- error_setg(errp,
- "Unable to get hash length: %s",
- gnutls_strerror(ret));
- goto error;
- }
- if (*resultlen == 0) {
- *resultlen = ret;
- *result = g_new0(uint8_t, *resultlen);
- } else if (*resultlen != ret) {
- error_setg(errp,
- "Result buffer size %zu is smaller than hash %d",
- *resultlen, ret);
- goto error;
- }
-
- gnutls_hash_deinit(dig, *result);
- return 0;
-
- error:
- gnutls_hash_deinit(dig, NULL);
- return -1;
-}
-
-#else /* ! CONFIG_GNUTLS_HASH */
-
-gboolean qcrypto_hash_supports(QCryptoHashAlgorithm alg G_GNUC_UNUSED)
-{
- return false;
-}
-
-int qcrypto_hash_bytesv(QCryptoHashAlgorithm alg,
- const struct iovec *iov G_GNUC_UNUSED,
- size_t niov G_GNUC_UNUSED,
- uint8_t **result G_GNUC_UNUSED,
- size_t *resultlen G_GNUC_UNUSED,
- Error **errp)
-{
- error_setg(errp,
- "Hash algorithm %d not supported without GNUTLS",
- alg);
- return -1;
-}
-
-#endif /* ! CONFIG_GNUTLS_HASH */
-
int qcrypto_hash_bytes(QCryptoHashAlgorithm alg,
const char *buf,
size_t len,
diff --git a/crypto/tlscreds.c b/crypto/tlscreds.c
index 1620e126ae..a8965531b6 100644
--- a/crypto/tlscreds.c
+++ b/crypto/tlscreds.c
@@ -179,6 +179,27 @@ qcrypto_tls_creds_prop_get_dir(Object *obj,
static void
+qcrypto_tls_creds_prop_set_priority(Object *obj,
+ const char *value,
+ Error **errp G_GNUC_UNUSED)
+{
+ QCryptoTLSCreds *creds = QCRYPTO_TLS_CREDS(obj);
+
+ creds->priority = g_strdup(value);
+}
+
+
+static char *
+qcrypto_tls_creds_prop_get_priority(Object *obj,
+ Error **errp G_GNUC_UNUSED)
+{
+ QCryptoTLSCreds *creds = QCRYPTO_TLS_CREDS(obj);
+
+ return g_strdup(creds->priority);
+}
+
+
+static void
qcrypto_tls_creds_prop_set_endpoint(Object *obj,
int value,
Error **errp G_GNUC_UNUSED)
@@ -216,6 +237,10 @@ qcrypto_tls_creds_class_init(ObjectClass *oc, void *data)
qcrypto_tls_creds_prop_get_endpoint,
qcrypto_tls_creds_prop_set_endpoint,
NULL);
+ object_class_property_add_str(oc, "priority",
+ qcrypto_tls_creds_prop_get_priority,
+ qcrypto_tls_creds_prop_set_priority,
+ NULL);
}
@@ -234,6 +259,7 @@ qcrypto_tls_creds_finalize(Object *obj)
QCryptoTLSCreds *creds = QCRYPTO_TLS_CREDS(obj);
g_free(creds->dir);
+ g_free(creds->priority);
}
diff --git a/crypto/tlssession.c b/crypto/tlssession.c
index a543e5a576..2de42c61cb 100644
--- a/crypto/tlssession.c
+++ b/crypto/tlssession.c
@@ -132,14 +132,22 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds,
if (object_dynamic_cast(OBJECT(creds),
TYPE_QCRYPTO_TLS_CREDS_ANON)) {
QCryptoTLSCredsAnon *acreds = QCRYPTO_TLS_CREDS_ANON(creds);
+ char *prio;
- ret = gnutls_priority_set_direct(session->handle,
- "NORMAL:+ANON-DH", NULL);
+ if (creds->priority != NULL) {
+ prio = g_strdup_printf("%s:+ANON-DH", creds->priority);
+ } else {
+ prio = g_strdup(CONFIG_TLS_PRIORITY ":+ANON-DH");
+ }
+
+ ret = gnutls_priority_set_direct(session->handle, prio, NULL);
if (ret < 0) {
- error_setg(errp, "Unable to set TLS session priority: %s",
- gnutls_strerror(ret));
+ error_setg(errp, "Unable to set TLS session priority %s: %s",
+ prio, gnutls_strerror(ret));
+ g_free(prio);
goto error;
}
+ g_free(prio);
if (creds->endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) {
ret = gnutls_credentials_set(session->handle,
GNUTLS_CRD_ANON,
@@ -157,11 +165,15 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds,
} else if (object_dynamic_cast(OBJECT(creds),
TYPE_QCRYPTO_TLS_CREDS_X509)) {
QCryptoTLSCredsX509 *tcreds = QCRYPTO_TLS_CREDS_X509(creds);
+ const char *prio = creds->priority;
+ if (!prio) {
+ prio = CONFIG_TLS_PRIORITY;
+ }
- ret = gnutls_set_default_priority(session->handle);
+ ret = gnutls_priority_set_direct(session->handle, prio, NULL);
if (ret < 0) {
- error_setg(errp, "Cannot set default TLS session priority: %s",
- gnutls_strerror(ret));
+ error_setg(errp, "Cannot set default TLS session priority %s: %s",
+ prio, gnutls_strerror(ret));
goto error;
}
ret = gnutls_credentials_set(session->handle,
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
index c5bcba754a..7a19863b18 100644
--- a/default-configs/arm-softmmu.mak
+++ b/default-configs/arm-softmmu.mak
@@ -66,6 +66,7 @@ CONFIG_PXA2XX=y
CONFIG_BITBANG_I2C=y
CONFIG_FRAMEBUFFER=y
CONFIG_XILINX_SPIPS=y
+CONFIG_ZYNQ_DEVCFG=y
CONFIG_ARM11SCU=y
CONFIG_A9SCU=y
diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak
index bb71b23ee7..c4be59f638 100644
--- a/default-configs/ppc64-softmmu.mak
+++ b/default-configs/ppc64-softmmu.mak
@@ -49,6 +49,7 @@ CONFIG_ETSEC=y
CONFIG_LIBDECNUMBER=y
# For pSeries
CONFIG_XICS=$(CONFIG_PSERIES)
+CONFIG_XICS_SPAPR=$(CONFIG_PSERIES)
CONFIG_XICS_KVM=$(and $(CONFIG_PSERIES),$(CONFIG_KVM))
# For PReP
CONFIG_MC146818RTC=y
diff --git a/dma-helpers.c b/dma-helpers.c
index b521d84ebd..9defc101b7 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -185,10 +185,17 @@ static void dma_aio_cancel(BlockAIOCB *acb)
}
}
+static AioContext *dma_get_aio_context(BlockAIOCB *acb)
+{
+ DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
+
+ return dbs->ctx;
+}
static const AIOCBInfo dma_aiocb_info = {
.aiocb_size = sizeof(DMAAIOCB),
.cancel_async = dma_aio_cancel,
+ .get_aio_context = dma_get_aio_context,
};
BlockAIOCB *dma_blk_io(AioContext *ctx,
diff --git a/docs/memory.txt b/docs/memory.txt
index 431d9ca88f..811b1bd3c5 100644
--- a/docs/memory.txt
+++ b/docs/memory.txt
@@ -41,8 +41,13 @@ MemoryRegion):
MemoryRegionOps structure describing the callbacks.
- ROM: a ROM memory region works like RAM for reads (directly accessing
- a region of host memory), but like MMIO for writes (invoking a callback).
- You initialize these with memory_region_init_rom_device().
+ a region of host memory), and forbids writes. You initialize these with
+ memory_region_init_rom().
+
+- ROM device: a ROM device memory region works like RAM for reads
+ (directly accessing a region of host memory), but like MMIO for
+ writes (invoking a callback). You initialize these with
+ memory_region_init_rom_device().
- IOMMU region: an IOMMU region translates addresses of accesses made to it
and forwards them to some other target memory region. As the name suggests,
diff --git a/docs/specs/acpi_cpu_hotplug.txt b/docs/specs/acpi_cpu_hotplug.txt
index 340b751a95..ee219c8358 100644
--- a/docs/specs/acpi_cpu_hotplug.txt
+++ b/docs/specs/acpi_cpu_hotplug.txt
@@ -4,21 +4,91 @@ QEMU<->ACPI BIOS CPU hotplug interface
QEMU supports CPU hotplug via ACPI. This document
describes the interface between QEMU and the ACPI BIOS.
-ACPI GPE block (IO ports 0xafe0-0xafe3, byte access):
------------------------------------------
-
-Generic ACPI GPE block. Bit 2 (GPE.2) used to notify CPU
-hot-add/remove event to ACPI BIOS, via SCI interrupt.
+ACPI BIOS GPE.2 handler is dedicated for notifying OS about CPU hot-add
+and hot-remove events.
+============================================
+Legacy ACPI CPU hotplug interface registers:
+--------------------------------------------
CPU present bitmap for:
ICH9-LPC (IO port 0x0cd8-0xcf7, 1-byte access)
PIIX-PM (IO port 0xaf00-0xaf1f, 1-byte access)
+ One bit per CPU. Bit position reflects corresponding CPU APIC ID. Read-only.
+ The first DWORD in bitmap is used in write mode to switch from legacy
+ to new CPU hotplug interface, write 0 into it to do switch.
---------------------------------------------------------------
-One bit per CPU. Bit position reflects corresponding CPU APIC ID.
-Read-only.
+QEMU sets corresponding CPU bit on hot-add event and issues SCI
+with GPE.2 event set. CPU present map is read by ACPI BIOS GPE.2 handler
+to notify OS about CPU hot-add events. CPU hot-remove isn't supported.
+
+=====================================
+ACPI CPU hotplug interface registers:
+-------------------------------------
+Register block base address:
+ ICH9-LPC IO port 0x0cd8
+ PIIX-PM IO port 0xaf00
+Register block size:
+ ACPI_CPU_HOTPLUG_REG_LEN = 12
+
+read access:
+ offset:
+ [0x0-0x3] reserved
+ [0x4] CPU device status fields: (1 byte access)
+ bits:
+ 0: Device is enabled and may be used by guest
+ 1: Device insert event, used to distinguish device for which
+ no device check event to OSPM was issued.
+ It's valid only when bit 0 is set.
+ 2: Device remove event, used to distinguish device for which
+ no device eject request to OSPM was issued.
+ 3-7: reserved and should be ignored by OSPM
+ [0x5-0x7] reserved
+ [0x8] Command data: (DWORD access)
+ in case of error or unsupported command reads is 0xFFFFFFFF
+ current 'Command field' value:
+ 0: returns PXM value corresponding to device
+
+write access:
+ offset:
+ [0x0-0x3] CPU selector: (DWORD access)
+ selects active CPU device. All following accesses to other
+ registers will read/store data from/to selected CPU.
+ [0x4] CPU device control fields: (1 byte access)
+ bits:
+ 0: reserved, OSPM must clear it before writing to register.
+ 1: if set to 1 clears device insert event, set by OSPM
+ after it has emitted device check event for the
+ selected CPU device
+ 2: if set to 1 clears device remove event, set by OSPM
+ after it has emitted device eject request for the
+ selected CPU device
+ 3: if set to 1 initiates device eject, set by OSPM when it
+ triggers CPU device removal and calls _EJ0 method
+ 4-7: reserved, OSPM must clear them before writing to register
+ [0x5] Command field: (1 byte access)
+ value:
+ 0: selects a CPU device with inserting/removing events and
+ following reads from 'Command data' register return
+ selected CPU (CPU selector value). If no CPU with events
+ found, the current CPU selector doesn't change and
+ corresponding insert/remove event flags are not set.
+ 1: following writes to 'Command data' register set OST event
+ register in QEMU
+ 2: following writes to 'Command data' register set OST status
+ register in QEMU
+ other values: reserved
+ [0x6-0x7] reserved
+ [0x8] Command data: (DWORD access)
+ current 'Command field' value:
+ 0: OSPM reads value of CPU selector
+ 1: stores value into OST event register
+ 2: stores value into OST status register, triggers
+ ACPI_DEVICE_OST QMP event from QEMU to external applications
+ with current values of OST event and status registers.
+ other values: reserved
-CPU hot-add/remove notification:
------------------------------------------------------
-QEMU sets/clears corresponding CPU bit on hot-add/remove event.
-CPU present map read by ACPI BIOS GPE.2 handler to notify OS of CPU
-hot-(un)plug events.
+Selecting CPU device beyond possible range has no effect on platform:
+ - write accesses to CPU hot-plug registers not documented above are
+ ignored
+ - read accesses to CPU hot-plug registers not documented above return
+ all bits set to 0.
diff --git a/docs/specs/acpi_nvdimm.txt b/docs/specs/acpi_nvdimm.txt
new file mode 100644
index 0000000000..0fdd251fc0
--- /dev/null
+++ b/docs/specs/acpi_nvdimm.txt
@@ -0,0 +1,132 @@
+QEMU<->ACPI BIOS NVDIMM interface
+---------------------------------
+
+QEMU supports NVDIMM via ACPI. This document describes the basic concepts of
+NVDIMM ACPI and the interface between QEMU and the ACPI BIOS.
+
+NVDIMM ACPI Background
+----------------------
+NVDIMM is introduced in ACPI 6.0 which defines an NVDIMM root device under
+_SB scope with a _HID of “ACPI0012”. For each NVDIMM present or intended
+to be supported by platform, platform firmware also exposes an ACPI
+Namespace Device under the root device.
+
+The NVDIMM child devices under the NVDIMM root device are defined with _ADR
+corresponding to the NFIT device handle. The NVDIMM root device and the
+NVDIMM devices can have device specific methods (_DSM) to provide additional
+functions specific to a particular NVDIMM implementation.
+
+This is an example from ACPI 6.0, a platform contains one NVDIMM:
+
+Scope (\_SB){
+ Device (NVDR) // Root device
+ {
+ Name (_HID, “ACPI0012”)
+ Method (_STA) {...}
+ Method (_FIT) {...}
+ Method (_DSM, ...) {...}
+ Device (NVD)
+ {
+ Name(_ADR, h) //where h is NFIT Device Handle for this NVDIMM
+ Method (_DSM, ...) {...}
+ }
+ }
+}
+
+Method supported on both NVDIMM root device and NVDIMM device
+_DSM (Device Specific Method)
+ It is a control method that enables devices to provide device specific
+ control functions that are consumed by the device driver.
+ The NVDIMM DSM specification can be found at:
+ http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
+
+ Arguments:
+ Arg0 – A Buffer containing a UUID (16 Bytes)
+ Arg1 – An Integer containing the Revision ID (4 Bytes)
+ Arg2 – An Integer containing the Function Index (4 Bytes)
+ Arg3 – A package containing parameters for the function specified by the
+ UUID, Revision ID, and Function Index
+
+ Return Value:
+ If Function Index = 0, a Buffer containing a function index bitfield.
+ Otherwise, the return value and type depends on the UUID, revision ID
+ and function index which are described in the DSM specification.
+
+Methods on NVDIMM ROOT Device
+_FIT(Firmware Interface Table)
+ It evaluates to a buffer returning data in the format of a series of NFIT
+ Type Structure.
+
+ Arguments: None
+
+ Return Value:
+ A Buffer containing a list of NFIT Type structure entries.
+
+ The detailed definition of the structure can be found at ACPI 6.0: 5.2.25
+ NVDIMM Firmware Interface Table (NFIT).
+
+QEMU NVDIMM Implemention
+========================
+QEMU uses 4 bytes IO Port starting from 0x0a18 and a RAM-based memory page
+for NVDIMM ACPI.
+
+Memory:
+ QEMU uses BIOS Linker/loader feature to ask BIOS to allocate a memory
+ page and dynamically patch its into a int32 object named "MEMA" in ACPI.
+
+ This page is RAM-based and it is used to transfer data between _DSM
+ method and QEMU. If ACPI has control, this pages is owned by ACPI which
+ writes _DSM input data to it, otherwise, it is owned by QEMU which
+ emulates _DSM access and writes the output data to it.
+
+ ACPI writes _DSM Input Data (based on the offset in the page):
+ [0x0 - 0x3]: 4 bytes, NVDIMM Device Handle, 0 is reserved for NVDIMM
+ Root device.
+ [0x4 - 0x7]: 4 bytes, Revision ID, that is the Arg1 of _DSM method.
+ [0x8 - 0xB]: 4 bytes. Function Index, that is the Arg2 of _DSM method.
+ [0xC - 0xFFF]: 4084 bytes, the Arg3 of _DSM method.
+
+ QEMU Writes Output Data (based on the offset in the page):
+ [0x0 - 0x3]: 4 bytes, the length of result
+ [0x4 - 0xFFF]: 4092 bytes, the DSM result filled by QEMU
+
+IO Port 0x0a18 - 0xa1b:
+ ACPI writes the address of the memory page allocated by BIOS to this
+ port then QEMU gets the control and fills the result in the memory page.
+
+ write Access:
+ [0x0a18 - 0xa1b]: 4 bytes, the address of the memory page allocated
+ by BIOS.
+
+_DSM process diagram:
+---------------------
+"MEMA" indicates the address of memory page allocated by BIOS.
+
+ +----------------------+   +-----------------------+
+ |   1. OSPM   |      | 2. OSPM |
+ | save _DSM input data | | write "MEMA" to | Exit to QEMU
+ | to the page +----->| IO port 0x0a18 +------------+
+ | indicated by "MEMA" | | | |
+ +----------------------+ +-----------------------+ |
+  |
+  v
+ +------------- ----+ +-----------+ +------------------+--------+
+ | 5 QEMU | | 4 QEMU | | 3. QEMU |
+ | write _DSM result | | emulate | | get _DSM input data from |
+ | to the page +<------+ _DSM +<-----+ the page indicated by the |
+ | | | | | value from the IO port |
+ +--------+-----------+ +-----------+ +---------------------------+
+ |
+ | Enter Guest
+ |
+ v
+ +--------------------------+ +--------------+
+ | 6 OSPM | | 7 OSPM |
+ | result size is returned | | _DSM return |
+ | by reading DSM +----->+ |
+ | result from the page | | |
+ +--------------------------+ +--------------+
+
+ _FIT implementation
+ -------------------
+ TODO (will fill it when nvdimm hotplug is introduced)
diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h
index a4cbdad452..43d08903a5 100644
--- a/fpu/softfloat-specialize.h
+++ b/fpu/softfloat-specialize.h
@@ -79,16 +79,6 @@ this code that are retained.
* version 2 or later. See the COPYING file in the top-level directory.
*/
-/* Does the target distinguish signaling NaNs from non-signaling NaNs
- * by setting the most significant bit of the mantissa for a signaling NaN?
- * (The more common choice is to have it be zero for SNaN and one for QNaN.)
- */
-#if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32)
-#define SNAN_BIT_IS_ONE 1
-#else
-#define SNAN_BIT_IS_ONE 0
-#endif
-
#if defined(TARGET_XTENSA)
/* Define for architectures which deviate from IEEE in not supporting
* signaling NaNs (so all NaNs are treated as quiet).
@@ -99,73 +89,106 @@ this code that are retained.
/*----------------------------------------------------------------------------
| The pattern for a default generated half-precision NaN.
*----------------------------------------------------------------------------*/
+float16 float16_default_nan(float_status *status)
+{
#if defined(TARGET_ARM)
-const float16 float16_default_nan = const_float16(0x7E00);
-#elif SNAN_BIT_IS_ONE
-const float16 float16_default_nan = const_float16(0x7DFF);
+ return const_float16(0x7E00);
+#else
+ if (status->snan_bit_is_one) {
+ return const_float16(0x7DFF);
+ } else {
+#if defined(TARGET_MIPS)
+ return const_float16(0x7E00);
#else
-const float16 float16_default_nan = const_float16(0xFE00);
+ return const_float16(0xFE00);
#endif
+ }
+#endif
+}
/*----------------------------------------------------------------------------
| The pattern for a default generated single-precision NaN.
*----------------------------------------------------------------------------*/
+float32 float32_default_nan(float_status *status)
+{
#if defined(TARGET_SPARC)
-const float32 float32_default_nan = const_float32(0x7FFFFFFF);
+ return const_float32(0x7FFFFFFF);
#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \
defined(TARGET_XTENSA) || defined(TARGET_S390X) || defined(TARGET_TRICORE)
-const float32 float32_default_nan = const_float32(0x7FC00000);
-#elif SNAN_BIT_IS_ONE
-const float32 float32_default_nan = const_float32(0x7FBFFFFF);
+ return const_float32(0x7FC00000);
#else
-const float32 float32_default_nan = const_float32(0xFFC00000);
+ if (status->snan_bit_is_one) {
+ return const_float32(0x7FBFFFFF);
+ } else {
+#if defined(TARGET_MIPS)
+ return const_float32(0x7FC00000);
+#else
+ return const_float32(0xFFC00000);
+#endif
+ }
#endif
+}
/*----------------------------------------------------------------------------
| The pattern for a default generated double-precision NaN.
*----------------------------------------------------------------------------*/
+float64 float64_default_nan(float_status *status)
+{
#if defined(TARGET_SPARC)
-const float64 float64_default_nan = const_float64(LIT64( 0x7FFFFFFFFFFFFFFF ));
+ return const_float64(LIT64(0x7FFFFFFFFFFFFFFF));
#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \
defined(TARGET_S390X)
-const float64 float64_default_nan = const_float64(LIT64( 0x7FF8000000000000 ));
-#elif SNAN_BIT_IS_ONE
-const float64 float64_default_nan = const_float64(LIT64(0x7FF7FFFFFFFFFFFF));
+ return const_float64(LIT64(0x7FF8000000000000));
+#else
+ if (status->snan_bit_is_one) {
+ return const_float64(LIT64(0x7FF7FFFFFFFFFFFF));
+ } else {
+#if defined(TARGET_MIPS)
+ return const_float64(LIT64(0x7FF8000000000000));
#else
-const float64 float64_default_nan = const_float64(LIT64( 0xFFF8000000000000 ));
+ return const_float64(LIT64(0xFFF8000000000000));
#endif
+ }
+#endif
+}
/*----------------------------------------------------------------------------
| The pattern for a default generated extended double-precision NaN.
*----------------------------------------------------------------------------*/
-#if SNAN_BIT_IS_ONE
-#define floatx80_default_nan_high 0x7FFF
-#define floatx80_default_nan_low LIT64(0xBFFFFFFFFFFFFFFF)
-#else
-#define floatx80_default_nan_high 0xFFFF
-#define floatx80_default_nan_low LIT64( 0xC000000000000000 )
-#endif
+floatx80 floatx80_default_nan(float_status *status)
+{
+ floatx80 r;
-const floatx80 floatx80_default_nan
- = make_floatx80_init(floatx80_default_nan_high, floatx80_default_nan_low);
+ if (status->snan_bit_is_one) {
+ r.low = LIT64(0xBFFFFFFFFFFFFFFF);
+ r.high = 0x7FFF;
+ } else {
+ r.low = LIT64(0xC000000000000000);
+ r.high = 0xFFFF;
+ }
+ return r;
+}
/*----------------------------------------------------------------------------
-| The pattern for a default generated quadruple-precision NaN. The `high' and
-| `low' values hold the most- and least-significant bits, respectively.
+| The pattern for a default generated quadruple-precision NaN.
*----------------------------------------------------------------------------*/
-#if SNAN_BIT_IS_ONE
-#define float128_default_nan_high LIT64(0x7FFF7FFFFFFFFFFF)
-#define float128_default_nan_low LIT64(0xFFFFFFFFFFFFFFFF)
-#elif defined(TARGET_S390X)
-#define float128_default_nan_high LIT64( 0x7FFF800000000000 )
-#define float128_default_nan_low LIT64( 0x0000000000000000 )
+float128 float128_default_nan(float_status *status)
+{
+ float128 r;
+
+ if (status->snan_bit_is_one) {
+ r.low = LIT64(0xFFFFFFFFFFFFFFFF);
+ r.high = LIT64(0x7FFF7FFFFFFFFFFF);
+ } else {
+ r.low = LIT64(0x0000000000000000);
+#if defined(TARGET_S390X)
+ r.high = LIT64(0x7FFF800000000000);
#else
-#define float128_default_nan_high LIT64( 0xFFFF800000000000 )
-#define float128_default_nan_low LIT64( 0x0000000000000000 )
+ r.high = LIT64(0xFFFF800000000000);
#endif
-
-const float128 float128_default_nan
- = make_float128_init(float128_default_nan_high, float128_default_nan_low);
+ }
+ return r;
+}
/*----------------------------------------------------------------------------
| Raises the exceptions specified by `flags'. Floating-point traps can be
@@ -188,12 +211,12 @@ typedef struct {
} commonNaNT;
#ifdef NO_SIGNALING_NANS
-int float16_is_quiet_nan(float16 a_)
+int float16_is_quiet_nan(float16 a_, float_status *status)
{
return float16_is_any_nan(a_);
}
-int float16_is_signaling_nan(float16 a_)
+int float16_is_signaling_nan(float16 a_, float_status *status)
{
return 0;
}
@@ -203,14 +226,14 @@ int float16_is_signaling_nan(float16 a_)
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float16_is_quiet_nan(float16 a_)
+int float16_is_quiet_nan(float16 a_, float_status *status)
{
uint16_t a = float16_val(a_);
-#if SNAN_BIT_IS_ONE
- return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF);
-#else
- return ((a & ~0x8000) >= 0x7c80);
-#endif
+ if (status->snan_bit_is_one) {
+ return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF);
+ } else {
+ return ((a & ~0x8000) >= 0x7C80);
+ }
}
/*----------------------------------------------------------------------------
@@ -218,14 +241,14 @@ int float16_is_quiet_nan(float16 a_)
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float16_is_signaling_nan(float16 a_)
+int float16_is_signaling_nan(float16 a_, float_status *status)
{
uint16_t a = float16_val(a_);
-#if SNAN_BIT_IS_ONE
- return ((a & ~0x8000) >= 0x7c80);
-#else
- return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF);
-#endif
+ if (status->snan_bit_is_one) {
+ return ((a & ~0x8000) >= 0x7C80);
+ } else {
+ return (((a >> 9) & 0x3F) == 0x3E) && (a & 0x1FF);
+ }
}
#endif
@@ -233,20 +256,16 @@ int float16_is_signaling_nan(float16 a_)
| Returns a quiet NaN if the half-precision floating point value `a' is a
| signaling NaN; otherwise returns `a'.
*----------------------------------------------------------------------------*/
-float16 float16_maybe_silence_nan(float16 a_)
+float16 float16_maybe_silence_nan(float16 a_, float_status *status)
{
- if (float16_is_signaling_nan(a_)) {
-#if SNAN_BIT_IS_ONE
-# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32)
- return float16_default_nan;
-# else
-# error Rules for silencing a signaling NaN are target-specific
-# endif
-#else
- uint16_t a = float16_val(a_);
- a |= (1 << 9);
- return make_float16(a);
-#endif
+ if (float16_is_signaling_nan(a_, status)) {
+ if (status->snan_bit_is_one) {
+ return float16_default_nan(status);
+ } else {
+ uint16_t a = float16_val(a_);
+ a |= (1 << 9);
+ return make_float16(a);
+ }
}
return a_;
}
@@ -261,12 +280,12 @@ static commonNaNT float16ToCommonNaN(float16 a, float_status *status)
{
commonNaNT z;
- if (float16_is_signaling_nan(a)) {
+ if (float16_is_signaling_nan(a, status)) {
float_raise(float_flag_invalid, status);
}
z.sign = float16_val(a) >> 15;
z.low = 0;
- z.high = ((uint64_t) float16_val(a))<<54;
+ z.high = ((uint64_t) float16_val(a)) << 54;
return z;
}
@@ -277,27 +296,27 @@ static commonNaNT float16ToCommonNaN(float16 a, float_status *status)
static float16 commonNaNToFloat16(commonNaNT a, float_status *status)
{
- uint16_t mantissa = a.high>>54;
+ uint16_t mantissa = a.high >> 54;
if (status->default_nan_mode) {
- return float16_default_nan;
+ return float16_default_nan(status);
}
if (mantissa) {
return make_float16(((((uint16_t) a.sign) << 15)
| (0x1F << 10) | mantissa));
} else {
- return float16_default_nan;
+ return float16_default_nan(status);
}
}
#ifdef NO_SIGNALING_NANS
-int float32_is_quiet_nan(float32 a_)
+int float32_is_quiet_nan(float32 a_, float_status *status)
{
return float32_is_any_nan(a_);
}
-int float32_is_signaling_nan(float32 a_)
+int float32_is_signaling_nan(float32 a_, float_status *status)
{
return 0;
}
@@ -307,14 +326,14 @@ int float32_is_signaling_nan(float32 a_)
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float32_is_quiet_nan( float32 a_ )
+int float32_is_quiet_nan(float32 a_, float_status *status)
{
uint32_t a = float32_val(a_);
-#if SNAN_BIT_IS_ONE
- return (((a >> 22) & 0x1ff) == 0x1fe) && (a & 0x003fffff);
-#else
- return ((uint32_t)(a << 1) >= 0xff800000);
-#endif
+ if (status->snan_bit_is_one) {
+ return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF);
+ } else {
+ return ((uint32_t)(a << 1) >= 0xFF800000);
+ }
}
/*----------------------------------------------------------------------------
@@ -322,14 +341,14 @@ int float32_is_quiet_nan( float32 a_ )
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float32_is_signaling_nan( float32 a_ )
+int float32_is_signaling_nan(float32 a_, float_status *status)
{
uint32_t a = float32_val(a_);
-#if SNAN_BIT_IS_ONE
- return ((uint32_t)(a << 1) >= 0xff800000);
-#else
- return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF );
-#endif
+ if (status->snan_bit_is_one) {
+ return ((uint32_t)(a << 1) >= 0xFF800000);
+ } else {
+ return (((a >> 22) & 0x1FF) == 0x1FE) && (a & 0x003FFFFF);
+ }
}
#endif
@@ -338,20 +357,16 @@ int float32_is_signaling_nan( float32 a_ )
| signaling NaN; otherwise returns `a'.
*----------------------------------------------------------------------------*/
-float32 float32_maybe_silence_nan( float32 a_ )
+float32 float32_maybe_silence_nan(float32 a_, float_status *status)
{
- if (float32_is_signaling_nan(a_)) {
-#if SNAN_BIT_IS_ONE
-# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32)
- return float32_default_nan;
-# else
-# error Rules for silencing a signaling NaN are target-specific
-# endif
-#else
- uint32_t a = float32_val(a_);
- a |= (1 << 22);
- return make_float32(a);
-#endif
+ if (float32_is_signaling_nan(a_, status)) {
+ if (status->snan_bit_is_one) {
+ return float32_default_nan(status);
+ } else {
+ uint32_t a = float32_val(a_);
+ a |= (1 << 22);
+ return make_float32(a);
+ }
}
return a_;
}
@@ -366,12 +381,12 @@ static commonNaNT float32ToCommonNaN(float32 a, float_status *status)
{
commonNaNT z;
- if (float32_is_signaling_nan(a)) {
+ if (float32_is_signaling_nan(a, status)) {
float_raise(float_flag_invalid, status);
}
- z.sign = float32_val(a)>>31;
+ z.sign = float32_val(a) >> 31;
z.low = 0;
- z.high = ( (uint64_t) float32_val(a) )<<41;
+ z.high = ((uint64_t)float32_val(a)) << 41;
return z;
}
@@ -382,17 +397,18 @@ static commonNaNT float32ToCommonNaN(float32 a, float_status *status)
static float32 commonNaNToFloat32(commonNaNT a, float_status *status)
{
- uint32_t mantissa = a.high>>41;
+ uint32_t mantissa = a.high >> 41;
if (status->default_nan_mode) {
- return float32_default_nan;
+ return float32_default_nan(status);
}
- if ( mantissa )
+ if (mantissa) {
return make_float32(
- ( ( (uint32_t) a.sign )<<31 ) | 0x7F800000 | ( a.high>>41 ) );
- else
- return float32_default_nan;
+ (((uint32_t)a.sign) << 31) | 0x7F800000 | (a.high >> 41));
+ } else {
+ return float32_default_nan(status);
+ }
}
/*----------------------------------------------------------------------------
@@ -494,11 +510,10 @@ static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN,
return aIsLargerSignificand ? 0 : 1;
}
return bIsQNaN ? 1 : 0;
- }
- else if (aIsQNaN) {
- if (bIsSNaN || !bIsQNaN)
+ } else if (aIsQNaN) {
+ if (bIsSNaN || !bIsQNaN) {
return 0;
- else {
+ } else {
return aIsLargerSignificand ? 0 : 1;
}
} else {
@@ -556,19 +571,36 @@ static int pickNaNMulAdd(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN,
return 3;
}
- /* Prefer sNaN over qNaN, in the a, b, c order. */
- if (aIsSNaN) {
- return 0;
- } else if (bIsSNaN) {
- return 1;
- } else if (cIsSNaN) {
- return 2;
- } else if (aIsQNaN) {
- return 0;
- } else if (bIsQNaN) {
- return 1;
+ if (status->snan_bit_is_one) {
+ /* Prefer sNaN over qNaN, in the a, b, c order. */
+ if (aIsSNaN) {
+ return 0;
+ } else if (bIsSNaN) {
+ return 1;
+ } else if (cIsSNaN) {
+ return 2;
+ } else if (aIsQNaN) {
+ return 0;
+ } else if (bIsQNaN) {
+ return 1;
+ } else {
+ return 2;
+ }
} else {
- return 2;
+ /* Prefer sNaN over qNaN, in the c, a, b order. */
+ if (cIsSNaN) {
+ return 2;
+ } else if (aIsSNaN) {
+ return 0;
+ } else if (bIsSNaN) {
+ return 1;
+ } else if (cIsQNaN) {
+ return 2;
+ } else if (aIsQNaN) {
+ return 0;
+ } else {
+ return 1;
+ }
}
}
#elif defined(TARGET_PPC)
@@ -626,10 +658,10 @@ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status)
flag aIsLargerSignificand;
uint32_t av, bv;
- aIsQuietNaN = float32_is_quiet_nan( a );
- aIsSignalingNaN = float32_is_signaling_nan( a );
- bIsQuietNaN = float32_is_quiet_nan( b );
- bIsSignalingNaN = float32_is_signaling_nan( b );
+ aIsQuietNaN = float32_is_quiet_nan(a, status);
+ aIsSignalingNaN = float32_is_signaling_nan(a, status);
+ bIsQuietNaN = float32_is_quiet_nan(b, status);
+ bIsSignalingNaN = float32_is_signaling_nan(b, status);
av = float32_val(a);
bv = float32_val(b);
@@ -637,12 +669,13 @@ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status)
float_raise(float_flag_invalid, status);
}
- if (status->default_nan_mode)
- return float32_default_nan;
+ if (status->default_nan_mode) {
+ return float32_default_nan(status);
+ }
- if ((uint32_t)(av<<1) < (uint32_t)(bv<<1)) {
+ if ((uint32_t)(av << 1) < (uint32_t)(bv << 1)) {
aIsLargerSignificand = 0;
- } else if ((uint32_t)(bv<<1) < (uint32_t)(av<<1)) {
+ } else if ((uint32_t)(bv << 1) < (uint32_t)(av << 1)) {
aIsLargerSignificand = 1;
} else {
aIsLargerSignificand = (av < bv) ? 1 : 0;
@@ -650,9 +683,9 @@ static float32 propagateFloat32NaN(float32 a, float32 b, float_status *status)
if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN,
aIsLargerSignificand)) {
- return float32_maybe_silence_nan(b);
+ return float32_maybe_silence_nan(b, status);
} else {
- return float32_maybe_silence_nan(a);
+ return float32_maybe_silence_nan(a, status);
}
}
@@ -673,12 +706,12 @@ static float32 propagateFloat32MulAddNaN(float32 a, float32 b,
cIsQuietNaN, cIsSignalingNaN;
int which;
- aIsQuietNaN = float32_is_quiet_nan(a);
- aIsSignalingNaN = float32_is_signaling_nan(a);
- bIsQuietNaN = float32_is_quiet_nan(b);
- bIsSignalingNaN = float32_is_signaling_nan(b);
- cIsQuietNaN = float32_is_quiet_nan(c);
- cIsSignalingNaN = float32_is_signaling_nan(c);
+ aIsQuietNaN = float32_is_quiet_nan(a, status);
+ aIsSignalingNaN = float32_is_signaling_nan(a, status);
+ bIsQuietNaN = float32_is_quiet_nan(b, status);
+ bIsSignalingNaN = float32_is_signaling_nan(b, status);
+ cIsQuietNaN = float32_is_quiet_nan(c, status);
+ cIsSignalingNaN = float32_is_signaling_nan(c, status);
if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) {
float_raise(float_flag_invalid, status);
@@ -692,29 +725,29 @@ static float32 propagateFloat32MulAddNaN(float32 a, float32 b,
/* Note that this check is after pickNaNMulAdd so that function
* has an opportunity to set the Invalid flag.
*/
- return float32_default_nan;
+ return float32_default_nan(status);
}
switch (which) {
case 0:
- return float32_maybe_silence_nan(a);
+ return float32_maybe_silence_nan(a, status);
case 1:
- return float32_maybe_silence_nan(b);
+ return float32_maybe_silence_nan(b, status);
case 2:
- return float32_maybe_silence_nan(c);
+ return float32_maybe_silence_nan(c, status);
case 3:
default:
- return float32_default_nan;
+ return float32_default_nan(status);
}
}
#ifdef NO_SIGNALING_NANS
-int float64_is_quiet_nan(float64 a_)
+int float64_is_quiet_nan(float64 a_, float_status *status)
{
return float64_is_any_nan(a_);
}
-int float64_is_signaling_nan(float64 a_)
+int float64_is_signaling_nan(float64 a_, float_status *status)
{
return 0;
}
@@ -724,15 +757,15 @@ int float64_is_signaling_nan(float64 a_)
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float64_is_quiet_nan( float64 a_ )
+int float64_is_quiet_nan(float64 a_, float_status *status)
{
uint64_t a = float64_val(a_);
-#if SNAN_BIT_IS_ONE
- return (((a >> 51) & 0xfff) == 0xffe)
- && (a & 0x0007ffffffffffffULL);
-#else
- return ((a << 1) >= 0xfff0000000000000ULL);
-#endif
+ if (status->snan_bit_is_one) {
+ return (((a >> 51) & 0xFFF) == 0xFFE)
+ && (a & 0x0007FFFFFFFFFFFFULL);
+ } else {
+ return ((a << 1) >= 0xFFF0000000000000ULL);
+ }
}
/*----------------------------------------------------------------------------
@@ -740,16 +773,15 @@ int float64_is_quiet_nan( float64 a_ )
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float64_is_signaling_nan( float64 a_ )
+int float64_is_signaling_nan(float64 a_, float_status *status)
{
uint64_t a = float64_val(a_);
-#if SNAN_BIT_IS_ONE
- return ((a << 1) >= 0xfff0000000000000ULL);
-#else
- return
- ( ( ( a>>51 ) & 0xFFF ) == 0xFFE )
- && ( a & LIT64( 0x0007FFFFFFFFFFFF ) );
-#endif
+ if (status->snan_bit_is_one) {
+ return ((a << 1) >= 0xFFF0000000000000ULL);
+ } else {
+ return (((a >> 51) & 0xFFF) == 0xFFE)
+ && (a & LIT64(0x0007FFFFFFFFFFFF));
+ }
}
#endif
@@ -758,20 +790,16 @@ int float64_is_signaling_nan( float64 a_ )
| signaling NaN; otherwise returns `a'.
*----------------------------------------------------------------------------*/
-float64 float64_maybe_silence_nan( float64 a_ )
+float64 float64_maybe_silence_nan(float64 a_, float_status *status)
{
- if (float64_is_signaling_nan(a_)) {
-#if SNAN_BIT_IS_ONE
-# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32)
- return float64_default_nan;
-# else
-# error Rules for silencing a signaling NaN are target-specific
-# endif
-#else
- uint64_t a = float64_val(a_);
- a |= LIT64( 0x0008000000000000 );
- return make_float64(a);
-#endif
+ if (float64_is_signaling_nan(a_, status)) {
+ if (status->snan_bit_is_one) {
+ return float64_default_nan(status);
+ } else {
+ uint64_t a = float64_val(a_);
+ a |= LIT64(0x0008000000000000);
+ return make_float64(a);
+ }
}
return a_;
}
@@ -786,12 +814,12 @@ static commonNaNT float64ToCommonNaN(float64 a, float_status *status)
{
commonNaNT z;
- if (float64_is_signaling_nan(a)) {
+ if (float64_is_signaling_nan(a, status)) {
float_raise(float_flag_invalid, status);
}
- z.sign = float64_val(a)>>63;
+ z.sign = float64_val(a) >> 63;
z.low = 0;
- z.high = float64_val(a)<<12;
+ z.high = float64_val(a) << 12;
return z;
}
@@ -802,19 +830,20 @@ static commonNaNT float64ToCommonNaN(float64 a, float_status *status)
static float64 commonNaNToFloat64(commonNaNT a, float_status *status)
{
- uint64_t mantissa = a.high>>12;
+ uint64_t mantissa = a.high >> 12;
if (status->default_nan_mode) {
- return float64_default_nan;
+ return float64_default_nan(status);
}
- if ( mantissa )
+ if (mantissa) {
return make_float64(
- ( ( (uint64_t) a.sign )<<63 )
- | LIT64( 0x7FF0000000000000 )
- | ( a.high>>12 ));
- else
- return float64_default_nan;
+ (((uint64_t) a.sign) << 63)
+ | LIT64(0x7FF0000000000000)
+ | (a.high >> 12));
+ } else {
+ return float64_default_nan(status);
+ }
}
/*----------------------------------------------------------------------------
@@ -829,10 +858,10 @@ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status)
flag aIsLargerSignificand;
uint64_t av, bv;
- aIsQuietNaN = float64_is_quiet_nan( a );
- aIsSignalingNaN = float64_is_signaling_nan( a );
- bIsQuietNaN = float64_is_quiet_nan( b );
- bIsSignalingNaN = float64_is_signaling_nan( b );
+ aIsQuietNaN = float64_is_quiet_nan(a, status);
+ aIsSignalingNaN = float64_is_signaling_nan(a, status);
+ bIsQuietNaN = float64_is_quiet_nan(b, status);
+ bIsSignalingNaN = float64_is_signaling_nan(b, status);
av = float64_val(a);
bv = float64_val(b);
@@ -840,12 +869,13 @@ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status)
float_raise(float_flag_invalid, status);
}
- if (status->default_nan_mode)
- return float64_default_nan;
+ if (status->default_nan_mode) {
+ return float64_default_nan(status);
+ }
- if ((uint64_t)(av<<1) < (uint64_t)(bv<<1)) {
+ if ((uint64_t)(av << 1) < (uint64_t)(bv << 1)) {
aIsLargerSignificand = 0;
- } else if ((uint64_t)(bv<<1) < (uint64_t)(av<<1)) {
+ } else if ((uint64_t)(bv << 1) < (uint64_t)(av << 1)) {
aIsLargerSignificand = 1;
} else {
aIsLargerSignificand = (av < bv) ? 1 : 0;
@@ -853,9 +883,9 @@ static float64 propagateFloat64NaN(float64 a, float64 b, float_status *status)
if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN,
aIsLargerSignificand)) {
- return float64_maybe_silence_nan(b);
+ return float64_maybe_silence_nan(b, status);
} else {
- return float64_maybe_silence_nan(a);
+ return float64_maybe_silence_nan(a, status);
}
}
@@ -876,12 +906,12 @@ static float64 propagateFloat64MulAddNaN(float64 a, float64 b,
cIsQuietNaN, cIsSignalingNaN;
int which;
- aIsQuietNaN = float64_is_quiet_nan(a);
- aIsSignalingNaN = float64_is_signaling_nan(a);
- bIsQuietNaN = float64_is_quiet_nan(b);
- bIsSignalingNaN = float64_is_signaling_nan(b);
- cIsQuietNaN = float64_is_quiet_nan(c);
- cIsSignalingNaN = float64_is_signaling_nan(c);
+ aIsQuietNaN = float64_is_quiet_nan(a, status);
+ aIsSignalingNaN = float64_is_signaling_nan(a, status);
+ bIsQuietNaN = float64_is_quiet_nan(b, status);
+ bIsSignalingNaN = float64_is_signaling_nan(b, status);
+ cIsQuietNaN = float64_is_quiet_nan(c, status);
+ cIsSignalingNaN = float64_is_signaling_nan(c, status);
if (aIsSignalingNaN | bIsSignalingNaN | cIsSignalingNaN) {
float_raise(float_flag_invalid, status);
@@ -895,29 +925,29 @@ static float64 propagateFloat64MulAddNaN(float64 a, float64 b,
/* Note that this check is after pickNaNMulAdd so that function
* has an opportunity to set the Invalid flag.
*/
- return float64_default_nan;
+ return float64_default_nan(status);
}
switch (which) {
case 0:
- return float64_maybe_silence_nan(a);
+ return float64_maybe_silence_nan(a, status);
case 1:
- return float64_maybe_silence_nan(b);
+ return float64_maybe_silence_nan(b, status);
case 2:
- return float64_maybe_silence_nan(c);
+ return float64_maybe_silence_nan(c, status);
case 3:
default:
- return float64_default_nan;
+ return float64_default_nan(status);
}
}
#ifdef NO_SIGNALING_NANS
-int floatx80_is_quiet_nan(floatx80 a_)
+int floatx80_is_quiet_nan(floatx80 a_, float_status *status)
{
return floatx80_is_any_nan(a_);
}
-int floatx80_is_signaling_nan(floatx80 a_)
+int floatx80_is_signaling_nan(floatx80 a_, float_status *status)
{
return 0;
}
@@ -928,19 +958,19 @@ int floatx80_is_signaling_nan(floatx80 a_)
| function for other types as floatx80 has an explicit bit.
*----------------------------------------------------------------------------*/
-int floatx80_is_quiet_nan( floatx80 a )
+int floatx80_is_quiet_nan(floatx80 a, float_status *status)
{
-#if SNAN_BIT_IS_ONE
- uint64_t aLow;
+ if (status->snan_bit_is_one) {
+ uint64_t aLow;
- aLow = a.low & ~0x4000000000000000ULL;
- return ((a.high & 0x7fff) == 0x7fff)
- && (aLow << 1)
- && (a.low == aLow);
-#else
- return ( ( a.high & 0x7FFF ) == 0x7FFF )
- && (LIT64( 0x8000000000000000 ) <= ((uint64_t) ( a.low<<1 )));
-#endif
+ aLow = a.low & ~0x4000000000000000ULL;
+ return ((a.high & 0x7FFF) == 0x7FFF)
+ && (aLow << 1)
+ && (a.low == aLow);
+ } else {
+ return ((a.high & 0x7FFF) == 0x7FFF)
+ && (LIT64(0x8000000000000000) <= ((uint64_t)(a.low << 1)));
+ }
}
/*----------------------------------------------------------------------------
@@ -949,20 +979,19 @@ int floatx80_is_quiet_nan( floatx80 a )
| function for other types as floatx80 has an explicit bit.
*----------------------------------------------------------------------------*/
-int floatx80_is_signaling_nan( floatx80 a )
+int floatx80_is_signaling_nan(floatx80 a, float_status *status)
{
-#if SNAN_BIT_IS_ONE
- return ((a.high & 0x7fff) == 0x7fff)
- && ((a.low << 1) >= 0x8000000000000000ULL);
-#else
- uint64_t aLow;
+ if (status->snan_bit_is_one) {
+ return ((a.high & 0x7FFF) == 0x7FFF)
+ && ((a.low << 1) >= 0x8000000000000000ULL);
+ } else {
+ uint64_t aLow;
- aLow = a.low & ~ LIT64( 0x4000000000000000 );
- return
- ( ( a.high & 0x7FFF ) == 0x7FFF )
- && (uint64_t) ( aLow<<1 )
- && ( a.low == aLow );
-#endif
+ aLow = a.low & ~LIT64(0x4000000000000000);
+ return ((a.high & 0x7FFF) == 0x7FFF)
+ && (uint64_t)(aLow << 1)
+ && (a.low == aLow);
+ }
}
#endif
@@ -971,20 +1000,15 @@ int floatx80_is_signaling_nan( floatx80 a )
| `a' is a signaling NaN; otherwise returns `a'.
*----------------------------------------------------------------------------*/
-floatx80 floatx80_maybe_silence_nan( floatx80 a )
+floatx80 floatx80_maybe_silence_nan(floatx80 a, float_status *status)
{
- if (floatx80_is_signaling_nan(a)) {
-#if SNAN_BIT_IS_ONE
-# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32)
- a.low = floatx80_default_nan_low;
- a.high = floatx80_default_nan_high;
-# else
-# error Rules for silencing a signaling NaN are target-specific
-# endif
-#else
- a.low |= LIT64( 0xC000000000000000 );
- return a;
-#endif
+ if (floatx80_is_signaling_nan(a, status)) {
+ if (status->snan_bit_is_one) {
+ a = floatx80_default_nan(status);
+ } else {
+ a.low |= LIT64(0xC000000000000000);
+ return a;
+ }
}
return a;
}
@@ -997,19 +1021,21 @@ floatx80 floatx80_maybe_silence_nan( floatx80 a )
static commonNaNT floatx80ToCommonNaN(floatx80 a, float_status *status)
{
+ floatx80 dflt;
commonNaNT z;
- if (floatx80_is_signaling_nan(a)) {
+ if (floatx80_is_signaling_nan(a, status)) {
float_raise(float_flag_invalid, status);
}
- if ( a.low >> 63 ) {
+ if (a.low >> 63) {
z.sign = a.high >> 15;
z.low = 0;
z.high = a.low << 1;
} else {
- z.sign = floatx80_default_nan_high >> 15;
+ dflt = floatx80_default_nan(status);
+ z.sign = dflt.high >> 15;
z.low = 0;
- z.high = floatx80_default_nan_low << 1;
+ z.high = dflt.low << 1;
}
return z;
}
@@ -1024,19 +1050,15 @@ static floatx80 commonNaNToFloatx80(commonNaNT a, float_status *status)
floatx80 z;
if (status->default_nan_mode) {
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
- return z;
+ return floatx80_default_nan(status);
}
if (a.high >> 1) {
- z.low = LIT64( 0x8000000000000000 ) | a.high >> 1;
- z.high = ( ( (uint16_t) a.sign )<<15 ) | 0x7FFF;
+ z.low = LIT64(0x8000000000000000) | a.high >> 1;
+ z.high = (((uint16_t)a.sign) << 15) | 0x7FFF;
} else {
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
+ z = floatx80_default_nan(status);
}
-
return z;
}
@@ -1052,19 +1074,17 @@ static floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b,
flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN;
flag aIsLargerSignificand;
- aIsQuietNaN = floatx80_is_quiet_nan( a );
- aIsSignalingNaN = floatx80_is_signaling_nan( a );
- bIsQuietNaN = floatx80_is_quiet_nan( b );
- bIsSignalingNaN = floatx80_is_signaling_nan( b );
+ aIsQuietNaN = floatx80_is_quiet_nan(a, status);
+ aIsSignalingNaN = floatx80_is_signaling_nan(a, status);
+ bIsQuietNaN = floatx80_is_quiet_nan(b, status);
+ bIsSignalingNaN = floatx80_is_signaling_nan(b, status);
if (aIsSignalingNaN | bIsSignalingNaN) {
float_raise(float_flag_invalid, status);
}
if (status->default_nan_mode) {
- a.low = floatx80_default_nan_low;
- a.high = floatx80_default_nan_high;
- return a;
+ return floatx80_default_nan(status);
}
if (a.low < b.low) {
@@ -1077,19 +1097,19 @@ static floatx80 propagateFloatx80NaN(floatx80 a, floatx80 b,
if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN,
aIsLargerSignificand)) {
- return floatx80_maybe_silence_nan(b);
+ return floatx80_maybe_silence_nan(b, status);
} else {
- return floatx80_maybe_silence_nan(a);
+ return floatx80_maybe_silence_nan(a, status);
}
}
#ifdef NO_SIGNALING_NANS
-int float128_is_quiet_nan(float128 a_)
+int float128_is_quiet_nan(float128 a_, float_status *status)
{
return float128_is_any_nan(a_);
}
-int float128_is_signaling_nan(float128 a_)
+int float128_is_signaling_nan(float128 a_, float_status *status)
{
return 0;
}
@@ -1099,16 +1119,15 @@ int float128_is_signaling_nan(float128 a_)
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float128_is_quiet_nan( float128 a )
+int float128_is_quiet_nan(float128 a, float_status *status)
{
-#if SNAN_BIT_IS_ONE
- return (((a.high >> 47) & 0xffff) == 0xfffe)
- && (a.low || (a.high & 0x00007fffffffffffULL));
-#else
- return
- ((a.high << 1) >= 0xffff000000000000ULL)
- && (a.low || (a.high & 0x0000ffffffffffffULL));
-#endif
+ if (status->snan_bit_is_one) {
+ return (((a.high >> 47) & 0xFFFF) == 0xFFFE)
+ && (a.low || (a.high & 0x00007FFFFFFFFFFFULL));
+ } else {
+ return ((a.high << 1) >= 0xFFFF000000000000ULL)
+ && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL));
+ }
}
/*----------------------------------------------------------------------------
@@ -1116,17 +1135,15 @@ int float128_is_quiet_nan( float128 a )
| signaling NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
-int float128_is_signaling_nan( float128 a )
+int float128_is_signaling_nan(float128 a, float_status *status)
{
-#if SNAN_BIT_IS_ONE
- return
- ((a.high << 1) >= 0xffff000000000000ULL)
- && (a.low || (a.high & 0x0000ffffffffffffULL));
-#else
- return
- ( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE )
- && ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) );
-#endif
+ if (status->snan_bit_is_one) {
+ return ((a.high << 1) >= 0xFFFF000000000000ULL)
+ && (a.low || (a.high & 0x0000FFFFFFFFFFFFULL));
+ } else {
+ return (((a.high >> 47) & 0xFFFF) == 0xFFFE)
+ && (a.low || (a.high & LIT64(0x00007FFFFFFFFFFF)));
+ }
}
#endif
@@ -1135,20 +1152,15 @@ int float128_is_signaling_nan( float128 a )
| a signaling NaN; otherwise returns `a'.
*----------------------------------------------------------------------------*/
-float128 float128_maybe_silence_nan( float128 a )
+float128 float128_maybe_silence_nan(float128 a, float_status *status)
{
- if (float128_is_signaling_nan(a)) {
-#if SNAN_BIT_IS_ONE
-# if defined(TARGET_MIPS) || defined(TARGET_SH4) || defined(TARGET_UNICORE32)
- a.low = float128_default_nan_low;
- a.high = float128_default_nan_high;
-# else
-# error Rules for silencing a signaling NaN are target-specific
-# endif
-#else
- a.high |= LIT64( 0x0000800000000000 );
- return a;
-#endif
+ if (float128_is_signaling_nan(a, status)) {
+ if (status->snan_bit_is_one) {
+ a = float128_default_nan(status);
+ } else {
+ a.high |= LIT64(0x0000800000000000);
+ return a;
+ }
}
return a;
}
@@ -1163,11 +1175,11 @@ static commonNaNT float128ToCommonNaN(float128 a, float_status *status)
{
commonNaNT z;
- if (float128_is_signaling_nan(a)) {
+ if (float128_is_signaling_nan(a, status)) {
float_raise(float_flag_invalid, status);
}
- z.sign = a.high>>63;
- shortShift128Left( a.high, a.low, 16, &z.high, &z.low );
+ z.sign = a.high >> 63;
+ shortShift128Left(a.high, a.low, 16, &z.high, &z.low);
return z;
}
@@ -1181,13 +1193,11 @@ static float128 commonNaNToFloat128(commonNaNT a, float_status *status)
float128 z;
if (status->default_nan_mode) {
- z.low = float128_default_nan_low;
- z.high = float128_default_nan_high;
- return z;
+ return float128_default_nan(status);
}
- shift128Right( a.high, a.low, 16, &z.high, &z.low );
- z.high |= ( ( (uint64_t) a.sign )<<63 ) | LIT64( 0x7FFF000000000000 );
+ shift128Right(a.high, a.low, 16, &z.high, &z.low);
+ z.high |= (((uint64_t)a.sign) << 63) | LIT64(0x7FFF000000000000);
return z;
}
@@ -1203,24 +1213,22 @@ static float128 propagateFloat128NaN(float128 a, float128 b,
flag aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN;
flag aIsLargerSignificand;
- aIsQuietNaN = float128_is_quiet_nan( a );
- aIsSignalingNaN = float128_is_signaling_nan( a );
- bIsQuietNaN = float128_is_quiet_nan( b );
- bIsSignalingNaN = float128_is_signaling_nan( b );
+ aIsQuietNaN = float128_is_quiet_nan(a, status);
+ aIsSignalingNaN = float128_is_signaling_nan(a, status);
+ bIsQuietNaN = float128_is_quiet_nan(b, status);
+ bIsSignalingNaN = float128_is_signaling_nan(b, status);
if (aIsSignalingNaN | bIsSignalingNaN) {
float_raise(float_flag_invalid, status);
}
if (status->default_nan_mode) {
- a.low = float128_default_nan_low;
- a.high = float128_default_nan_high;
- return a;
+ return float128_default_nan(status);
}
- if (lt128(a.high<<1, a.low, b.high<<1, b.low)) {
+ if (lt128(a.high << 1, a.low, b.high << 1, b.low)) {
aIsLargerSignificand = 0;
- } else if (lt128(b.high<<1, b.low, a.high<<1, a.low)) {
+ } else if (lt128(b.high << 1, b.low, a.high << 1, a.low)) {
aIsLargerSignificand = 1;
} else {
aIsLargerSignificand = (a.high < b.high) ? 1 : 0;
@@ -1228,9 +1236,8 @@ static float128 propagateFloat128NaN(float128 a, float128 b,
if (pickNaN(aIsQuietNaN, aIsSignalingNaN, bIsQuietNaN, bIsSignalingNaN,
aIsLargerSignificand)) {
- return float128_maybe_silence_nan(b);
+ return float128_maybe_silence_nan(b, status);
} else {
- return float128_maybe_silence_nan(a);
+ return float128_maybe_silence_nan(a, status);
}
}
-
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 166c48e434..9b1eccff24 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -2105,7 +2105,7 @@ static float32 subFloat32Sigs(float32 a, float32 b, flag zSign,
return propagateFloat32NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
if ( aExp == 0 ) {
aExp = 1;
@@ -2234,7 +2234,7 @@ float32 float32_mul(float32 a, float32 b, float_status *status)
}
if ( ( bExp | bSig ) == 0 ) {
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
return packFloat32( zSign, 0xFF, 0 );
}
@@ -2244,7 +2244,7 @@ float32 float32_mul(float32 a, float32 b, float_status *status)
}
if ( ( aExp | aSig ) == 0 ) {
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
return packFloat32( zSign, 0xFF, 0 );
}
@@ -2299,7 +2299,7 @@ float32 float32_div(float32 a, float32 b, float_status *status)
return propagateFloat32NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
return packFloat32( zSign, 0xFF, 0 );
}
@@ -2313,7 +2313,7 @@ float32 float32_div(float32 a, float32 b, float_status *status)
if ( bSig == 0 ) {
if ( ( aExp | aSig ) == 0 ) {
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
float_raise(float_flag_divbyzero, status);
return packFloat32( zSign, 0xFF, 0 );
@@ -2367,7 +2367,7 @@ float32 float32_rem(float32 a, float32 b, float_status *status)
return propagateFloat32NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
if ( bExp == 0xFF ) {
if (bSig) {
@@ -2378,7 +2378,7 @@ float32 float32_rem(float32 a, float32 b, float_status *status)
if ( bExp == 0 ) {
if ( bSig == 0 ) {
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
normalizeFloat32Subnormal( bSig, &bExp, &bSig );
}
@@ -2493,7 +2493,7 @@ float32 float32_muladd(float32 a, float32 b, float32 c, int flags,
if (infzero) {
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
if (flags & float_muladd_negate_c) {
@@ -2514,7 +2514,7 @@ float32 float32_muladd(float32 a, float32 b, float32 c, int flags,
if (pInf && (pSign ^ cSign)) {
/* addition of opposite-signed infinities => InvalidOperation */
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
/* Otherwise generate an infinity of the same sign */
return packFloat32(cSign ^ signflip, 0xff, 0);
@@ -2690,12 +2690,12 @@ float32 float32_sqrt(float32 a, float_status *status)
}
if ( ! aSign ) return a;
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
if ( aSign ) {
if ( ( aExp | aSig ) == 0 ) return a;
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
if ( aExp == 0 ) {
if ( aSig == 0 ) return float32_zero;
@@ -2828,7 +2828,7 @@ float32 float32_log2(float32 a, float_status *status)
}
if ( aSign ) {
float_raise(float_flag_invalid, status);
- return float32_default_nan;
+ return float32_default_nan(status);
}
if ( aExp == 0xFF ) {
if (aSig) {
@@ -2974,7 +2974,8 @@ int float32_eq_quiet(float32 a, float32 b, float_status *status)
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
- if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
+ if (float32_is_signaling_nan(a, status)
+ || float32_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -3000,7 +3001,8 @@ int float32_le_quiet(float32 a, float32 b, float_status *status)
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
- if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
+ if (float32_is_signaling_nan(a, status)
+ || float32_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -3031,7 +3033,8 @@ int float32_lt_quiet(float32 a, float32 b, float_status *status)
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
- if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
+ if (float32_is_signaling_nan(a, status)
+ || float32_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -3060,7 +3063,8 @@ int float32_unordered_quiet(float32 a, float32 b, float_status *status)
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
- if ( float32_is_signaling_nan( a ) || float32_is_signaling_nan( b ) ) {
+ if (float32_is_signaling_nan(a, status)
+ || float32_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 1;
@@ -3896,7 +3900,7 @@ static float64 subFloat64Sigs(float64 a, float64 b, flag zSign,
return propagateFloat64NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
if ( aExp == 0 ) {
aExp = 1;
@@ -4023,7 +4027,7 @@ float64 float64_mul(float64 a, float64 b, float_status *status)
}
if ( ( bExp | bSig ) == 0 ) {
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
return packFloat64( zSign, 0x7FF, 0 );
}
@@ -4033,7 +4037,7 @@ float64 float64_mul(float64 a, float64 b, float_status *status)
}
if ( ( aExp | aSig ) == 0 ) {
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
return packFloat64( zSign, 0x7FF, 0 );
}
@@ -4090,7 +4094,7 @@ float64 float64_div(float64 a, float64 b, float_status *status)
return propagateFloat64NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
return packFloat64( zSign, 0x7FF, 0 );
}
@@ -4104,7 +4108,7 @@ float64 float64_div(float64 a, float64 b, float_status *status)
if ( bSig == 0 ) {
if ( ( aExp | aSig ) == 0 ) {
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
float_raise(float_flag_divbyzero, status);
return packFloat64( zSign, 0x7FF, 0 );
@@ -4162,7 +4166,7 @@ float64 float64_rem(float64 a, float64 b, float_status *status)
return propagateFloat64NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
if ( bExp == 0x7FF ) {
if (bSig) {
@@ -4173,7 +4177,7 @@ float64 float64_rem(float64 a, float64 b, float_status *status)
if ( bExp == 0 ) {
if ( bSig == 0 ) {
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
normalizeFloat64Subnormal( bSig, &bExp, &bSig );
}
@@ -4275,7 +4279,7 @@ float64 float64_muladd(float64 a, float64 b, float64 c, int flags,
if (infzero) {
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
if (flags & float_muladd_negate_c) {
@@ -4296,7 +4300,7 @@ float64 float64_muladd(float64 a, float64 b, float64 c, int flags,
if (pInf && (pSign ^ cSign)) {
/* addition of opposite-signed infinities => InvalidOperation */
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
/* Otherwise generate an infinity of the same sign */
return packFloat64(cSign ^ signflip, 0x7ff, 0);
@@ -4494,12 +4498,12 @@ float64 float64_sqrt(float64 a, float_status *status)
}
if ( ! aSign ) return a;
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
if ( aSign ) {
if ( ( aExp | aSig ) == 0 ) return a;
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
if ( aExp == 0 ) {
if ( aSig == 0 ) return float64_zero;
@@ -4547,7 +4551,7 @@ float64 float64_log2(float64 a, float_status *status)
}
if ( aSign ) {
float_raise(float_flag_invalid, status);
- return float64_default_nan;
+ return float64_default_nan(status);
}
if ( aExp == 0x7FF ) {
if (aSig) {
@@ -4694,7 +4698,8 @@ int float64_eq_quiet(float64 a, float64 b, float_status *status)
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
- if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) {
+ if (float64_is_signaling_nan(a, status)
+ || float64_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -4722,7 +4727,8 @@ int float64_le_quiet(float64 a, float64 b, float_status *status)
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
- if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) {
+ if (float64_is_signaling_nan(a, status)
+ || float64_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -4753,7 +4759,8 @@ int float64_lt_quiet(float64 a, float64 b, float_status *status)
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
- if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) {
+ if (float64_is_signaling_nan(a, status)
+ || float64_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -4782,7 +4789,8 @@ int float64_unordered_quiet(float64 a, float64 b, float_status *status)
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
- if ( float64_is_signaling_nan( a ) || float64_is_signaling_nan( b ) ) {
+ if (float64_is_signaling_nan(a, status)
+ || float64_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 1;
@@ -5207,7 +5215,6 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, flag zSign,
int32_t aExp, bExp, zExp;
uint64_t aSig, bSig, zSig0, zSig1;
int32_t expDiff;
- floatx80 z;
aSig = extractFloatx80Frac( a );
aExp = extractFloatx80Exp( a );
@@ -5221,9 +5228,7 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, flag zSign,
return propagateFloatx80NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
- return z;
+ return floatx80_default_nan(status);
}
if ( aExp == 0 ) {
aExp = 1;
@@ -5317,7 +5322,6 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, float_status *status)
flag aSign, bSign, zSign;
int32_t aExp, bExp, zExp;
uint64_t aSig, bSig, zSig0, zSig1;
- floatx80 z;
aSig = extractFloatx80Frac( a );
aExp = extractFloatx80Exp( a );
@@ -5341,9 +5345,7 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, float_status *status)
if ( ( aExp | aSig ) == 0 ) {
invalid:
float_raise(float_flag_invalid, status);
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
- return z;
+ return floatx80_default_nan(status);
}
return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) );
}
@@ -5377,7 +5379,6 @@ floatx80 floatx80_div(floatx80 a, floatx80 b, float_status *status)
int32_t aExp, bExp, zExp;
uint64_t aSig, bSig, zSig0, zSig1;
uint64_t rem0, rem1, rem2, term0, term1, term2;
- floatx80 z;
aSig = extractFloatx80Frac( a );
aExp = extractFloatx80Exp( a );
@@ -5409,9 +5410,7 @@ floatx80 floatx80_div(floatx80 a, floatx80 b, float_status *status)
if ( ( aExp | aSig ) == 0 ) {
invalid:
float_raise(float_flag_invalid, status);
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
- return z;
+ return floatx80_default_nan(status);
}
float_raise(float_flag_divbyzero, status);
return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) );
@@ -5461,7 +5460,6 @@ floatx80 floatx80_rem(floatx80 a, floatx80 b, float_status *status)
int32_t aExp, bExp, expDiff;
uint64_t aSig0, aSig1, bSig;
uint64_t q, term0, term1, alternateASig0, alternateASig1;
- floatx80 z;
aSig0 = extractFloatx80Frac( a );
aExp = extractFloatx80Exp( a );
@@ -5485,9 +5483,7 @@ floatx80 floatx80_rem(floatx80 a, floatx80 b, float_status *status)
if ( bSig == 0 ) {
invalid:
float_raise(float_flag_invalid, status);
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
- return z;
+ return floatx80_default_nan(status);
}
normalizeFloatx80Subnormal( bSig, &bExp, &bSig );
}
@@ -5559,7 +5555,6 @@ floatx80 floatx80_sqrt(floatx80 a, float_status *status)
int32_t aExp, zExp;
uint64_t aSig0, aSig1, zSig0, zSig1, doubleZSig0;
uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3;
- floatx80 z;
aSig0 = extractFloatx80Frac( a );
aExp = extractFloatx80Exp( a );
@@ -5575,9 +5570,7 @@ floatx80 floatx80_sqrt(floatx80 a, float_status *status)
if ( ( aExp | aSig0 ) == 0 ) return a;
invalid:
float_raise(float_flag_invalid, status);
- z.low = floatx80_default_nan_low;
- z.high = floatx80_default_nan_high;
- return z;
+ return floatx80_default_nan(status);
}
if ( aExp == 0 ) {
if ( aSig0 == 0 ) return packFloatx80( 0, 0, 0 );
@@ -5745,8 +5738,8 @@ int floatx80_eq_quiet(floatx80 a, floatx80 b, float_status *status)
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (uint64_t) ( extractFloatx80Frac( b )<<1 ) )
) {
- if ( floatx80_is_signaling_nan( a )
- || floatx80_is_signaling_nan( b ) ) {
+ if (floatx80_is_signaling_nan(a, status)
+ || floatx80_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -5776,8 +5769,8 @@ int floatx80_le_quiet(floatx80 a, floatx80 b, float_status *status)
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (uint64_t) ( extractFloatx80Frac( b )<<1 ) )
) {
- if ( floatx80_is_signaling_nan( a )
- || floatx80_is_signaling_nan( b ) ) {
+ if (floatx80_is_signaling_nan(a, status)
+ || floatx80_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -5812,8 +5805,8 @@ int floatx80_lt_quiet(floatx80 a, floatx80 b, float_status *status)
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (uint64_t) ( extractFloatx80Frac( b )<<1 ) )
) {
- if ( floatx80_is_signaling_nan( a )
- || floatx80_is_signaling_nan( b ) ) {
+ if (floatx80_is_signaling_nan(a, status)
+ || floatx80_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -5845,8 +5838,8 @@ int floatx80_unordered_quiet(floatx80 a, floatx80 b, float_status *status)
|| ( ( extractFloatx80Exp( b ) == 0x7FFF )
&& (uint64_t) ( extractFloatx80Frac( b )<<1 ) )
) {
- if ( floatx80_is_signaling_nan( a )
- || floatx80_is_signaling_nan( b ) ) {
+ if (floatx80_is_signaling_nan(a, status)
+ || floatx80_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 1;
@@ -6385,7 +6378,6 @@ static float128 subFloat128Sigs(float128 a, float128 b, flag zSign,
int32_t aExp, bExp, zExp;
uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1;
int32_t expDiff;
- float128 z;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
@@ -6403,9 +6395,7 @@ static float128 subFloat128Sigs(float128 a, float128 b, flag zSign,
return propagateFloat128NaN(a, b, status);
}
float_raise(float_flag_invalid, status);
- z.low = float128_default_nan_low;
- z.high = float128_default_nan_high;
- return z;
+ return float128_default_nan(status);
}
if ( aExp == 0 ) {
aExp = 1;
@@ -6515,7 +6505,6 @@ float128 float128_mul(float128 a, float128 b, float_status *status)
flag aSign, bSign, zSign;
int32_t aExp, bExp, zExp;
uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2, zSig3;
- float128 z;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
@@ -6541,9 +6530,7 @@ float128 float128_mul(float128 a, float128 b, float_status *status)
if ( ( aExp | aSig0 | aSig1 ) == 0 ) {
invalid:
float_raise(float_flag_invalid, status);
- z.low = float128_default_nan_low;
- z.high = float128_default_nan_high;
- return z;
+ return float128_default_nan(status);
}
return packFloat128( zSign, 0x7FFF, 0, 0 );
}
@@ -6582,7 +6569,6 @@ float128 float128_div(float128 a, float128 b, float_status *status)
int32_t aExp, bExp, zExp;
uint64_t aSig0, aSig1, bSig0, bSig1, zSig0, zSig1, zSig2;
uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3;
- float128 z;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
@@ -6616,9 +6602,7 @@ float128 float128_div(float128 a, float128 b, float_status *status)
if ( ( aExp | aSig0 | aSig1 ) == 0 ) {
invalid:
float_raise(float_flag_invalid, status);
- z.low = float128_default_nan_low;
- z.high = float128_default_nan_high;
- return z;
+ return float128_default_nan(status);
}
float_raise(float_flag_divbyzero, status);
return packFloat128( zSign, 0x7FFF, 0, 0 );
@@ -6673,7 +6657,6 @@ float128 float128_rem(float128 a, float128 b, float_status *status)
uint64_t aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2;
uint64_t allZero, alternateASig0, alternateASig1, sigMean1;
int64_t sigMean0;
- float128 z;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
@@ -6699,9 +6682,7 @@ float128 float128_rem(float128 a, float128 b, float_status *status)
if ( ( bSig0 | bSig1 ) == 0 ) {
invalid:
float_raise(float_flag_invalid, status);
- z.low = float128_default_nan_low;
- z.high = float128_default_nan_high;
- return z;
+ return float128_default_nan(status);
}
normalizeFloat128Subnormal( bSig0, bSig1, &bExp, &bSig0, &bSig1 );
}
@@ -6782,7 +6763,6 @@ float128 float128_sqrt(float128 a, float_status *status)
int32_t aExp, zExp;
uint64_t aSig0, aSig1, zSig0, zSig1, zSig2, doubleZSig0;
uint64_t rem0, rem1, rem2, rem3, term0, term1, term2, term3;
- float128 z;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
@@ -6799,9 +6779,7 @@ float128 float128_sqrt(float128 a, float_status *status)
if ( ( aExp | aSig0 | aSig1 ) == 0 ) return a;
invalid:
float_raise(float_flag_invalid, status);
- z.low = float128_default_nan_low;
- z.high = float128_default_nan_high;
- return z;
+ return float128_default_nan(status);
}
if ( aExp == 0 ) {
if ( ( aSig0 | aSig1 ) == 0 ) return packFloat128( 0, 0, 0, 0 );
@@ -6969,8 +6947,8 @@ int float128_eq_quiet(float128 a, float128 b, float_status *status)
|| ( ( extractFloat128Exp( b ) == 0x7FFF )
&& ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )
) {
- if ( float128_is_signaling_nan( a )
- || float128_is_signaling_nan( b ) ) {
+ if (float128_is_signaling_nan(a, status)
+ || float128_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -7000,8 +6978,8 @@ int float128_le_quiet(float128 a, float128 b, float_status *status)
|| ( ( extractFloat128Exp( b ) == 0x7FFF )
&& ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )
) {
- if ( float128_is_signaling_nan( a )
- || float128_is_signaling_nan( b ) ) {
+ if (float128_is_signaling_nan(a, status)
+ || float128_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -7036,8 +7014,8 @@ int float128_lt_quiet(float128 a, float128 b, float_status *status)
|| ( ( extractFloat128Exp( b ) == 0x7FFF )
&& ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )
) {
- if ( float128_is_signaling_nan( a )
- || float128_is_signaling_nan( b ) ) {
+ if (float128_is_signaling_nan(a, status)
+ || float128_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 0;
@@ -7070,8 +7048,8 @@ int float128_unordered_quiet(float128 a, float128 b, float_status *status)
|| ( ( extractFloat128Exp( b ) == 0x7FFF )
&& ( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )
) {
- if ( float128_is_signaling_nan( a )
- || float128_is_signaling_nan( b ) ) {
+ if (float128_is_signaling_nan(a, status)
+ || float128_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return 1;
@@ -7351,8 +7329,8 @@ static inline int float ## s ## _compare_internal(float ## s a, float ## s b,\
( ( extractFloat ## s ## Exp( b ) == nan_exp ) && \
extractFloat ## s ## Frac( b ) )) { \
if (!is_quiet || \
- float ## s ## _is_signaling_nan( a ) || \
- float ## s ## _is_signaling_nan( b ) ) { \
+ float ## s ## _is_signaling_nan(a, status) || \
+ float ## s ## _is_signaling_nan(b, status)) { \
float_raise(float_flag_invalid, status); \
} \
return float_relation_unordered; \
@@ -7401,8 +7379,8 @@ static inline int floatx80_compare_internal(floatx80 a, floatx80 b,
( ( extractFloatx80Exp( b ) == 0x7fff ) &&
( extractFloatx80Frac( b )<<1 ) )) {
if (!is_quiet ||
- floatx80_is_signaling_nan( a ) ||
- floatx80_is_signaling_nan( b ) ) {
+ floatx80_is_signaling_nan(a, status) ||
+ floatx80_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return float_relation_unordered;
@@ -7447,8 +7425,8 @@ static inline int float128_compare_internal(float128 a, float128 b,
( ( extractFloat128Exp( b ) == 0x7fff ) &&
( extractFloat128Frac0( b ) | extractFloat128Frac1( b ) ) )) {
if (!is_quiet ||
- float128_is_signaling_nan( a ) ||
- float128_is_signaling_nan( b ) ) {
+ float128_is_signaling_nan(a, status) ||
+ float128_is_signaling_nan(b, status)) {
float_raise(float_flag_invalid, status);
}
return float_relation_unordered;
@@ -7508,11 +7486,11 @@ static inline float ## s float ## s ## _minmax(float ## s a, float ## s b, \
if (float ## s ## _is_any_nan(a) || \
float ## s ## _is_any_nan(b)) { \
if (isieee) { \
- if (float ## s ## _is_quiet_nan(a) && \
+ if (float ## s ## _is_quiet_nan(a, status) && \
!float ## s ##_is_any_nan(b)) { \
return b; \
- } else if (float ## s ## _is_quiet_nan(b) && \
- !float ## s ## _is_any_nan(a)) { \
+ } else if (float ## s ## _is_quiet_nan(b, status) && \
+ !float ## s ## _is_any_nan(a)) { \
return a; \
} \
} \
diff --git a/fsdev/9p-iov-marshal.c b/fsdev/9p-iov-marshal.c
index fce1ee9e55..584082b5d6 100644
--- a/fsdev/9p-iov-marshal.c
+++ b/fsdev/9p-iov-marshal.c
@@ -14,7 +14,6 @@
#include "qemu/osdep.h"
#include <glib/gprintf.h>
#include <utime.h>
-#include <sys/uio.h>
#include "9p-iov-marshal.h"
#include "qemu/bswap.h"
diff --git a/fsdev/9p-marshal.c b/fsdev/9p-marshal.c
index f56ef0e60c..238dbf21b1 100644
--- a/fsdev/9p-marshal.c
+++ b/fsdev/9p-marshal.c
@@ -15,7 +15,6 @@
#include <glib/gprintf.h>
#include <dirent.h>
#include <utime.h>
-#include <sys/uio.h>
#include "9p-marshal.h"
diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h
index 5561494974..b1338ba06c 100644
--- a/fsdev/file-op-9p.h
+++ b/fsdev/file-op-9p.h
@@ -14,7 +14,6 @@
#define _FILEOP_H
#include <dirent.h>
#include <utime.h>
-#include <sys/uio.h>
#include <sys/vfs.h>
#define SM_LOCAL_MODE_BITS 0600
diff --git a/hmp.c b/hmp.c
index 997a768214..925601ac3e 100644
--- a/hmp.c
+++ b/hmp.c
@@ -2457,17 +2457,17 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
c = l->value->props;
monitor_printf(mon, " CPUInstance Properties:\n");
- if (c->has_node) {
- monitor_printf(mon, " node: \"%" PRIu64 "\"\n", c->node);
+ if (c->has_node_id) {
+ monitor_printf(mon, " node-id: \"%" PRIu64 "\"\n", c->node_id);
}
- if (c->has_socket) {
- monitor_printf(mon, " socket: \"%" PRIu64 "\"\n", c->socket);
+ if (c->has_socket_id) {
+ monitor_printf(mon, " socket-id: \"%" PRIu64 "\"\n", c->socket_id);
}
- if (c->has_core) {
- monitor_printf(mon, " core: \"%" PRIu64 "\"\n", c->core);
+ if (c->has_core_id) {
+ monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id);
}
- if (c->has_thread) {
- monitor_printf(mon, " thread: \"%" PRIu64 "\"\n", c->thread);
+ if (c->has_thread_id) {
+ monitor_printf(mon, " thread-id: \"%" PRIu64 "\"\n", c->thread_id);
}
l = l->next;
diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c
index 73c8be816b..4b6d4e6a3f 100644
--- a/hw/9pfs/9p-synth.c
+++ b/hw/9pfs/9p-synth.c
@@ -21,19 +21,19 @@
#include "qemu/cutils.h"
/* Root node for synth file system */
-static V9fsSynthNode v9fs_synth_root = {
+static V9fsSynthNode synth_root = {
.name = "/",
.actual_attr = {
.mode = 0555 | S_IFDIR,
.nlink = 1,
},
- .attr = &v9fs_synth_root.actual_attr,
+ .attr = &synth_root.actual_attr,
};
-static QemuMutex v9fs_synth_mutex;
-static int v9fs_synth_node_count;
+static QemuMutex synth_mutex;
+static int synth_node_count;
/* set to 1 when the synth fs is ready */
-static int v9fs_synth_fs;
+static int synth_fs;
static V9fsSynthNode *v9fs_add_dir_node(V9fsSynthNode *parent, int mode,
const char *name,
@@ -69,16 +69,16 @@ int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode,
int ret;
V9fsSynthNode *node, *tmp;
- if (!v9fs_synth_fs) {
+ if (!synth_fs) {
return EAGAIN;
}
if (!name || (strlen(name) >= NAME_MAX)) {
return EINVAL;
}
if (!parent) {
- parent = &v9fs_synth_root;
+ parent = &synth_root;
}
- qemu_mutex_lock(&v9fs_synth_mutex);
+ qemu_mutex_lock(&synth_mutex);
QLIST_FOREACH(tmp, &parent->child, sibling) {
if (!strcmp(tmp->name, name)) {
ret = EEXIST;
@@ -86,7 +86,7 @@ int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode,
}
}
/* Add the name */
- node = v9fs_add_dir_node(parent, mode, name, NULL, v9fs_synth_node_count++);
+ node = v9fs_add_dir_node(parent, mode, name, NULL, synth_node_count++);
v9fs_add_dir_node(node, parent->attr->mode, "..",
parent->attr, parent->attr->inode);
v9fs_add_dir_node(node, node->attr->mode, ".",
@@ -94,7 +94,7 @@ int qemu_v9fs_synth_mkdir(V9fsSynthNode *parent, int mode,
*result = node;
ret = 0;
err_out:
- qemu_mutex_unlock(&v9fs_synth_mutex);
+ qemu_mutex_unlock(&synth_mutex);
return ret;
}
@@ -105,17 +105,17 @@ int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode,
int ret;
V9fsSynthNode *node, *tmp;
- if (!v9fs_synth_fs) {
+ if (!synth_fs) {
return EAGAIN;
}
if (!name || (strlen(name) >= NAME_MAX)) {
return EINVAL;
}
if (!parent) {
- parent = &v9fs_synth_root;
+ parent = &synth_root;
}
- qemu_mutex_lock(&v9fs_synth_mutex);
+ qemu_mutex_lock(&synth_mutex);
QLIST_FOREACH(tmp, &parent->child, sibling) {
if (!strcmp(tmp->name, name)) {
ret = EEXIST;
@@ -126,7 +126,7 @@ int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode,
mode = ((mode & 0777) | S_IFREG);
node = g_malloc0(sizeof(V9fsSynthNode));
node->attr = &node->actual_attr;
- node->attr->inode = v9fs_synth_node_count++;
+ node->attr->inode = synth_node_count++;
node->attr->nlink = 1;
node->attr->read = read;
node->attr->write = write;
@@ -136,11 +136,11 @@ int qemu_v9fs_synth_add_file(V9fsSynthNode *parent, int mode,
QLIST_INSERT_HEAD_RCU(&parent->child, node, sibling);
ret = 0;
err_out:
- qemu_mutex_unlock(&v9fs_synth_mutex);
+ qemu_mutex_unlock(&synth_mutex);
return ret;
}
-static void v9fs_synth_fill_statbuf(V9fsSynthNode *node, struct stat *stbuf)
+static void synth_fill_statbuf(V9fsSynthNode *node, struct stat *stbuf)
{
stbuf->st_dev = 0;
stbuf->st_ino = node->attr->inode;
@@ -157,24 +157,24 @@ static void v9fs_synth_fill_statbuf(V9fsSynthNode *node, struct stat *stbuf)
stbuf->st_ctime = 0;
}
-static int v9fs_synth_lstat(FsContext *fs_ctx,
+static int synth_lstat(FsContext *fs_ctx,
V9fsPath *fs_path, struct stat *stbuf)
{
V9fsSynthNode *node = *(V9fsSynthNode **)fs_path->data;
- v9fs_synth_fill_statbuf(node, stbuf);
+ synth_fill_statbuf(node, stbuf);
return 0;
}
-static int v9fs_synth_fstat(FsContext *fs_ctx, int fid_type,
+static int synth_fstat(FsContext *fs_ctx, int fid_type,
V9fsFidOpenState *fs, struct stat *stbuf)
{
V9fsSynthOpenState *synth_open = fs->private;
- v9fs_synth_fill_statbuf(synth_open->node, stbuf);
+ synth_fill_statbuf(synth_open->node, stbuf);
return 0;
}
-static int v9fs_synth_opendir(FsContext *ctx,
+static int synth_opendir(FsContext *ctx,
V9fsPath *fs_path, V9fsFidOpenState *fs)
{
V9fsSynthOpenState *synth_open;
@@ -187,7 +187,7 @@ static int v9fs_synth_opendir(FsContext *ctx,
return 0;
}
-static int v9fs_synth_closedir(FsContext *ctx, V9fsFidOpenState *fs)
+static int synth_closedir(FsContext *ctx, V9fsFidOpenState *fs)
{
V9fsSynthOpenState *synth_open = fs->private;
V9fsSynthNode *node = synth_open->node;
@@ -198,24 +198,24 @@ static int v9fs_synth_closedir(FsContext *ctx, V9fsFidOpenState *fs)
return 0;
}
-static off_t v9fs_synth_telldir(FsContext *ctx, V9fsFidOpenState *fs)
+static off_t synth_telldir(FsContext *ctx, V9fsFidOpenState *fs)
{
V9fsSynthOpenState *synth_open = fs->private;
return synth_open->offset;
}
-static void v9fs_synth_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off)
+static void synth_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off)
{
V9fsSynthOpenState *synth_open = fs->private;
synth_open->offset = off;
}
-static void v9fs_synth_rewinddir(FsContext *ctx, V9fsFidOpenState *fs)
+static void synth_rewinddir(FsContext *ctx, V9fsFidOpenState *fs)
{
- v9fs_synth_seekdir(ctx, fs, 0);
+ synth_seekdir(ctx, fs, 0);
}
-static void v9fs_synth_direntry(V9fsSynthNode *node,
+static void synth_direntry(V9fsSynthNode *node,
struct dirent *entry, off_t off)
{
strcpy(entry->d_name, node->name);
@@ -223,7 +223,7 @@ static void v9fs_synth_direntry(V9fsSynthNode *node,
entry->d_off = off + 1;
}
-static struct dirent *v9fs_synth_get_dentry(V9fsSynthNode *dir,
+static struct dirent *synth_get_dentry(V9fsSynthNode *dir,
struct dirent *entry, off_t off)
{
int i = 0;
@@ -242,23 +242,23 @@ static struct dirent *v9fs_synth_get_dentry(V9fsSynthNode *dir,
/* end of directory */
return NULL;
}
- v9fs_synth_direntry(node, entry, off);
+ synth_direntry(node, entry, off);
return entry;
}
-static struct dirent *v9fs_synth_readdir(FsContext *ctx, V9fsFidOpenState *fs)
+static struct dirent *synth_readdir(FsContext *ctx, V9fsFidOpenState *fs)
{
struct dirent *entry;
V9fsSynthOpenState *synth_open = fs->private;
V9fsSynthNode *node = synth_open->node;
- entry = v9fs_synth_get_dentry(node, &synth_open->dent, synth_open->offset);
+ entry = synth_get_dentry(node, &synth_open->dent, synth_open->offset);
if (entry) {
synth_open->offset++;
}
return entry;
}
-static int v9fs_synth_open(FsContext *ctx, V9fsPath *fs_path,
+static int synth_open(FsContext *ctx, V9fsPath *fs_path,
int flags, V9fsFidOpenState *fs)
{
V9fsSynthOpenState *synth_open;
@@ -271,7 +271,7 @@ static int v9fs_synth_open(FsContext *ctx, V9fsPath *fs_path,
return 0;
}
-static int v9fs_synth_open2(FsContext *fs_ctx, V9fsPath *dir_path,
+static int synth_open2(FsContext *fs_ctx, V9fsPath *dir_path,
const char *name, int flags,
FsCred *credp, V9fsFidOpenState *fs)
{
@@ -279,7 +279,7 @@ static int v9fs_synth_open2(FsContext *fs_ctx, V9fsPath *dir_path,
return -1;
}
-static int v9fs_synth_close(FsContext *ctx, V9fsFidOpenState *fs)
+static int synth_close(FsContext *ctx, V9fsFidOpenState *fs)
{
V9fsSynthOpenState *synth_open = fs->private;
V9fsSynthNode *node = synth_open->node;
@@ -290,7 +290,7 @@ static int v9fs_synth_close(FsContext *ctx, V9fsFidOpenState *fs)
return 0;
}
-static ssize_t v9fs_synth_pwritev(FsContext *ctx, V9fsFidOpenState *fs,
+static ssize_t synth_pwritev(FsContext *ctx, V9fsFidOpenState *fs,
const struct iovec *iov,
int iovcnt, off_t offset)
{
@@ -314,7 +314,7 @@ static ssize_t v9fs_synth_pwritev(FsContext *ctx, V9fsFidOpenState *fs,
return count;
}
-static ssize_t v9fs_synth_preadv(FsContext *ctx, V9fsFidOpenState *fs,
+static ssize_t synth_preadv(FsContext *ctx, V9fsFidOpenState *fs,
const struct iovec *iov,
int iovcnt, off_t offset)
{
@@ -338,112 +338,112 @@ static ssize_t v9fs_synth_preadv(FsContext *ctx, V9fsFidOpenState *fs,
return count;
}
-static int v9fs_synth_truncate(FsContext *ctx, V9fsPath *path, off_t offset)
+static int synth_truncate(FsContext *ctx, V9fsPath *path, off_t offset)
{
errno = ENOSYS;
return -1;
}
-static int v9fs_synth_chmod(FsContext *fs_ctx, V9fsPath *path, FsCred *credp)
+static int synth_chmod(FsContext *fs_ctx, V9fsPath *path, FsCred *credp)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_mknod(FsContext *fs_ctx, V9fsPath *path,
+static int synth_mknod(FsContext *fs_ctx, V9fsPath *path,
const char *buf, FsCred *credp)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_mkdir(FsContext *fs_ctx, V9fsPath *path,
+static int synth_mkdir(FsContext *fs_ctx, V9fsPath *path,
const char *buf, FsCred *credp)
{
errno = EPERM;
return -1;
}
-static ssize_t v9fs_synth_readlink(FsContext *fs_ctx, V9fsPath *path,
+static ssize_t synth_readlink(FsContext *fs_ctx, V9fsPath *path,
char *buf, size_t bufsz)
{
errno = ENOSYS;
return -1;
}
-static int v9fs_synth_symlink(FsContext *fs_ctx, const char *oldpath,
+static int synth_symlink(FsContext *fs_ctx, const char *oldpath,
V9fsPath *newpath, const char *buf, FsCred *credp)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_link(FsContext *fs_ctx, V9fsPath *oldpath,
+static int synth_link(FsContext *fs_ctx, V9fsPath *oldpath,
V9fsPath *newpath, const char *buf)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_rename(FsContext *ctx, const char *oldpath,
+static int synth_rename(FsContext *ctx, const char *oldpath,
const char *newpath)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_chown(FsContext *fs_ctx, V9fsPath *path, FsCred *credp)
+static int synth_chown(FsContext *fs_ctx, V9fsPath *path, FsCred *credp)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_utimensat(FsContext *fs_ctx, V9fsPath *path,
+static int synth_utimensat(FsContext *fs_ctx, V9fsPath *path,
const struct timespec *buf)
{
errno = EPERM;
return 0;
}
-static int v9fs_synth_remove(FsContext *ctx, const char *path)
+static int synth_remove(FsContext *ctx, const char *path)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_fsync(FsContext *ctx, int fid_type,
+static int synth_fsync(FsContext *ctx, int fid_type,
V9fsFidOpenState *fs, int datasync)
{
errno = ENOSYS;
return 0;
}
-static int v9fs_synth_statfs(FsContext *s, V9fsPath *fs_path,
+static int synth_statfs(FsContext *s, V9fsPath *fs_path,
struct statfs *stbuf)
{
stbuf->f_type = 0xABCD;
stbuf->f_bsize = 512;
stbuf->f_blocks = 0;
- stbuf->f_files = v9fs_synth_node_count;
+ stbuf->f_files = synth_node_count;
stbuf->f_namelen = NAME_MAX;
return 0;
}
-static ssize_t v9fs_synth_lgetxattr(FsContext *ctx, V9fsPath *path,
+static ssize_t synth_lgetxattr(FsContext *ctx, V9fsPath *path,
const char *name, void *value, size_t size)
{
errno = ENOTSUP;
return -1;
}
-static ssize_t v9fs_synth_llistxattr(FsContext *ctx, V9fsPath *path,
+static ssize_t synth_llistxattr(FsContext *ctx, V9fsPath *path,
void *value, size_t size)
{
errno = ENOTSUP;
return -1;
}
-static int v9fs_synth_lsetxattr(FsContext *ctx, V9fsPath *path,
+static int synth_lsetxattr(FsContext *ctx, V9fsPath *path,
const char *name, void *value,
size_t size, int flags)
{
@@ -451,14 +451,14 @@ static int v9fs_synth_lsetxattr(FsContext *ctx, V9fsPath *path,
return -1;
}
-static int v9fs_synth_lremovexattr(FsContext *ctx,
+static int synth_lremovexattr(FsContext *ctx,
V9fsPath *path, const char *name)
{
errno = ENOTSUP;
return -1;
}
-static int v9fs_synth_name_to_path(FsContext *ctx, V9fsPath *dir_path,
+static int synth_name_to_path(FsContext *ctx, V9fsPath *dir_path,
const char *name, V9fsPath *target)
{
V9fsSynthNode *node;
@@ -471,7 +471,7 @@ static int v9fs_synth_name_to_path(FsContext *ctx, V9fsPath *dir_path,
}
if (!dir_path) {
- dir_node = &v9fs_synth_root;
+ dir_node = &synth_root;
} else {
dir_node = *(V9fsSynthNode **)dir_path->data;
}
@@ -500,7 +500,7 @@ out:
return 0;
}
-static int v9fs_synth_renameat(FsContext *ctx, V9fsPath *olddir,
+static int synth_renameat(FsContext *ctx, V9fsPath *olddir,
const char *old_name, V9fsPath *newdir,
const char *new_name)
{
@@ -508,62 +508,62 @@ static int v9fs_synth_renameat(FsContext *ctx, V9fsPath *olddir,
return -1;
}
-static int v9fs_synth_unlinkat(FsContext *ctx, V9fsPath *dir,
+static int synth_unlinkat(FsContext *ctx, V9fsPath *dir,
const char *name, int flags)
{
errno = EPERM;
return -1;
}
-static int v9fs_synth_init(FsContext *ctx)
+static int synth_init(FsContext *ctx)
{
- QLIST_INIT(&v9fs_synth_root.child);
- qemu_mutex_init(&v9fs_synth_mutex);
+ QLIST_INIT(&synth_root.child);
+ qemu_mutex_init(&synth_mutex);
/* Add "." and ".." entries for root */
- v9fs_add_dir_node(&v9fs_synth_root, v9fs_synth_root.attr->mode,
- "..", v9fs_synth_root.attr, v9fs_synth_root.attr->inode);
- v9fs_add_dir_node(&v9fs_synth_root, v9fs_synth_root.attr->mode,
- ".", v9fs_synth_root.attr, v9fs_synth_root.attr->inode);
+ v9fs_add_dir_node(&synth_root, synth_root.attr->mode,
+ "..", synth_root.attr, synth_root.attr->inode);
+ v9fs_add_dir_node(&synth_root, synth_root.attr->mode,
+ ".", synth_root.attr, synth_root.attr->inode);
/* Mark the subsystem is ready for use */
- v9fs_synth_fs = 1;
+ synth_fs = 1;
return 0;
}
FileOperations synth_ops = {
- .init = v9fs_synth_init,
- .lstat = v9fs_synth_lstat,
- .readlink = v9fs_synth_readlink,
- .close = v9fs_synth_close,
- .closedir = v9fs_synth_closedir,
- .open = v9fs_synth_open,
- .opendir = v9fs_synth_opendir,
- .rewinddir = v9fs_synth_rewinddir,
- .telldir = v9fs_synth_telldir,
- .readdir = v9fs_synth_readdir,
- .seekdir = v9fs_synth_seekdir,
- .preadv = v9fs_synth_preadv,
- .pwritev = v9fs_synth_pwritev,
- .chmod = v9fs_synth_chmod,
- .mknod = v9fs_synth_mknod,
- .mkdir = v9fs_synth_mkdir,
- .fstat = v9fs_synth_fstat,
- .open2 = v9fs_synth_open2,
- .symlink = v9fs_synth_symlink,
- .link = v9fs_synth_link,
- .truncate = v9fs_synth_truncate,
- .rename = v9fs_synth_rename,
- .chown = v9fs_synth_chown,
- .utimensat = v9fs_synth_utimensat,
- .remove = v9fs_synth_remove,
- .fsync = v9fs_synth_fsync,
- .statfs = v9fs_synth_statfs,
- .lgetxattr = v9fs_synth_lgetxattr,
- .llistxattr = v9fs_synth_llistxattr,
- .lsetxattr = v9fs_synth_lsetxattr,
- .lremovexattr = v9fs_synth_lremovexattr,
- .name_to_path = v9fs_synth_name_to_path,
- .renameat = v9fs_synth_renameat,
- .unlinkat = v9fs_synth_unlinkat,
+ .init = synth_init,
+ .lstat = synth_lstat,
+ .readlink = synth_readlink,
+ .close = synth_close,
+ .closedir = synth_closedir,
+ .open = synth_open,
+ .opendir = synth_opendir,
+ .rewinddir = synth_rewinddir,
+ .telldir = synth_telldir,
+ .readdir = synth_readdir,
+ .seekdir = synth_seekdir,
+ .preadv = synth_preadv,
+ .pwritev = synth_pwritev,
+ .chmod = synth_chmod,
+ .mknod = synth_mknod,
+ .mkdir = synth_mkdir,
+ .fstat = synth_fstat,
+ .open2 = synth_open2,
+ .symlink = synth_symlink,
+ .link = synth_link,
+ .truncate = synth_truncate,
+ .rename = synth_rename,
+ .chown = synth_chown,
+ .utimensat = synth_utimensat,
+ .remove = synth_remove,
+ .fsync = synth_fsync,
+ .statfs = synth_statfs,
+ .lgetxattr = synth_lgetxattr,
+ .llistxattr = synth_llistxattr,
+ .lsetxattr = synth_lsetxattr,
+ .lremovexattr = synth_lremovexattr,
+ .name_to_path = synth_name_to_path,
+ .renameat = synth_renameat,
+ .unlinkat = synth_unlinkat,
};
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 66bd72702b..4b7da6639f 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -2,7 +2,9 @@ common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
+common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
common-obj-$(CONFIG_ACPI) += acpi_interface.o
common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
common-obj-$(CONFIG_ACPI) += aml-build.o
+common-obj-$(call land,$(CONFIG_ACPI),$(CONFIG_IPMI)) += ipmi.o
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 874e473cac..db3e914fb4 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -660,6 +660,20 @@ Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4)
return var;
}
+/* helper to call method with 5 arguments */
+Aml *aml_call5(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
+ Aml *arg5)
+{
+ Aml *var = aml_alloc();
+ build_append_namestring(var->buf, "%s", method);
+ aml_append(var, arg1);
+ aml_append(var, arg2);
+ aml_append(var, arg3);
+ aml_append(var, arg4);
+ aml_append(var, arg5);
+ return var;
+}
+
/*
* ACPI 5.0: 6.4.3.8.1 GPIO Connection Descriptor
* Type 1, Large Item Name 0xC
@@ -1481,6 +1495,14 @@ Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target)
target);
}
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefObjectType */
+Aml *aml_object_type(Aml *object)
+{
+ Aml *var = aml_opcode(0x8E /* ObjectTypeOp */);
+ aml_append(var, object);
+ return var;
+}
+
void
build_header(BIOSLinker *linker, GArray *table_data,
AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
new file mode 100644
index 0000000000..c13b65c2c9
--- /dev/null
+++ b/hw/acpi/cpu.c
@@ -0,0 +1,561 @@
+#include "qemu/osdep.h"
+#include "hw/boards.h"
+#include "hw/acpi/cpu.h"
+#include "qapi/error.h"
+#include "qapi-event.h"
+#include "trace.h"
+
+#define ACPI_CPU_HOTPLUG_REG_LEN 12
+#define ACPI_CPU_SELECTOR_OFFSET_WR 0
+#define ACPI_CPU_FLAGS_OFFSET_RW 4
+#define ACPI_CPU_CMD_OFFSET_WR 5
+#define ACPI_CPU_CMD_DATA_OFFSET_RW 8
+
+enum {
+ CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0,
+ CPHP_OST_EVENT_CMD = 1,
+ CPHP_OST_STATUS_CMD = 2,
+ CPHP_CMD_MAX
+};
+
+static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev)
+{
+ ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
+
+ info->slot_type = ACPI_SLOT_TYPE_CPU;
+ info->slot = g_strdup_printf("%d", idx);
+ info->source = cdev->ost_event;
+ info->status = cdev->ost_status;
+ if (cdev->cpu) {
+ DeviceState *dev = DEVICE(cdev->cpu);
+ if (dev->id) {
+ info->device = g_strdup(dev->id);
+ info->has_device = true;
+ }
+ }
+ return info;
+}
+
+void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
+{
+ int i;
+
+ for (i = 0; i < cpu_st->dev_count; i++) {
+ ACPIOSTInfoList *elem = g_new0(ACPIOSTInfoList, 1);
+ elem->value = acpi_cpu_device_status(i, &cpu_st->devs[i]);
+ elem->next = NULL;
+ **list = elem;
+ *list = &elem->next;
+ }
+}
+
+static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t val = 0;
+ CPUHotplugState *cpu_st = opaque;
+ AcpiCpuStatus *cdev;
+
+ if (cpu_st->selector >= cpu_st->dev_count) {
+ return val;
+ }
+
+ cdev = &cpu_st->devs[cpu_st->selector];
+ switch (addr) {
+ case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */
+ val |= cdev->cpu ? 1 : 0;
+ val |= cdev->is_inserting ? 2 : 0;
+ val |= cdev->is_removing ? 4 : 0;
+ trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
+ break;
+ case ACPI_CPU_CMD_DATA_OFFSET_RW:
+ switch (cpu_st->command) {
+ case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD:
+ val = cpu_st->selector;
+ break;
+ default:
+ break;
+ }
+ trace_cpuhp_acpi_read_cmd_data(cpu_st->selector, val);
+ break;
+ default:
+ break;
+ }
+ return val;
+}
+
+static void cpu_hotplug_wr(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
+{
+ CPUHotplugState *cpu_st = opaque;
+ AcpiCpuStatus *cdev;
+ ACPIOSTInfo *info;
+
+ assert(cpu_st->dev_count);
+
+ if (addr) {
+ if (cpu_st->selector >= cpu_st->dev_count) {
+ trace_cpuhp_acpi_invalid_idx_selected(cpu_st->selector);
+ return;
+ }
+ }
+
+ switch (addr) {
+ case ACPI_CPU_SELECTOR_OFFSET_WR: /* current CPU selector */
+ cpu_st->selector = data;
+ trace_cpuhp_acpi_write_idx(cpu_st->selector);
+ break;
+ case ACPI_CPU_FLAGS_OFFSET_RW: /* set is_* fields */
+ cdev = &cpu_st->devs[cpu_st->selector];
+ if (data & 2) { /* clear insert event */
+ cdev->is_inserting = false;
+ trace_cpuhp_acpi_clear_inserting_evt(cpu_st->selector);
+ } else if (data & 4) { /* clear remove event */
+ cdev->is_removing = false;
+ trace_cpuhp_acpi_clear_remove_evt(cpu_st->selector);
+ } else if (data & 8) {
+ DeviceState *dev = NULL;
+ HotplugHandler *hotplug_ctrl = NULL;
+
+ if (!cdev->cpu) {
+ trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st->selector);
+ break;
+ }
+
+ trace_cpuhp_acpi_ejecting_cpu(cpu_st->selector);
+ dev = DEVICE(cdev->cpu);
+ hotplug_ctrl = qdev_get_hotplug_handler(dev);
+ hotplug_handler_unplug(hotplug_ctrl, dev, NULL);
+ }
+ break;
+ case ACPI_CPU_CMD_OFFSET_WR:
+ trace_cpuhp_acpi_write_cmd(cpu_st->selector, data);
+ if (data < CPHP_CMD_MAX) {
+ cpu_st->command = data;
+ if (cpu_st->command == CPHP_GET_NEXT_CPU_WITH_EVENT_CMD) {
+ uint32_t iter = cpu_st->selector;
+
+ do {
+ cdev = &cpu_st->devs[iter];
+ if (cdev->is_inserting || cdev->is_removing) {
+ cpu_st->selector = iter;
+ trace_cpuhp_acpi_cpu_has_events(cpu_st->selector,
+ cdev->is_inserting, cdev->is_removing);
+ break;
+ }
+ iter = iter + 1 < cpu_st->dev_count ? iter + 1 : 0;
+ } while (iter != cpu_st->selector);
+ }
+ }
+ break;
+ case ACPI_CPU_CMD_DATA_OFFSET_RW:
+ switch (cpu_st->command) {
+ case CPHP_OST_EVENT_CMD: {
+ cdev = &cpu_st->devs[cpu_st->selector];
+ cdev->ost_event = data;
+ trace_cpuhp_acpi_write_ost_ev(cpu_st->selector, cdev->ost_event);
+ break;
+ }
+ case CPHP_OST_STATUS_CMD: {
+ cdev = &cpu_st->devs[cpu_st->selector];
+ cdev->ost_status = data;
+ info = acpi_cpu_device_status(cpu_st->selector, cdev);
+ qapi_event_send_acpi_device_ost(info, &error_abort);
+ qapi_free_ACPIOSTInfo(info);
+ trace_cpuhp_acpi_write_ost_status(cpu_st->selector,
+ cdev->ost_status);
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps cpu_hotplug_ops = {
+ .read = cpu_hotplug_rd,
+ .write = cpu_hotplug_wr,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
+ CPUHotplugState *state, hwaddr base_addr)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ CPUArchIdList *id_list;
+ int i;
+
+ assert(mc->possible_cpu_arch_ids);
+ id_list = mc->possible_cpu_arch_ids(machine);
+ state->dev_count = id_list->len;
+ state->devs = g_new0(typeof(*state->devs), state->dev_count);
+ for (i = 0; i < id_list->len; i++) {
+ state->devs[i].cpu = id_list->cpus[i].cpu;
+ state->devs[i].arch_id = id_list->cpus[i].arch_id;
+ }
+ g_free(id_list);
+ memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
+ "acpi-mem-hotplug", ACPI_CPU_HOTPLUG_REG_LEN);
+ memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
+}
+
+static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
+{
+ CPUClass *k = CPU_GET_CLASS(dev);
+ uint64_t cpu_arch_id = k->get_arch_id(CPU(dev));
+ int i;
+
+ for (i = 0; i < cpu_st->dev_count; i++) {
+ if (cpu_arch_id == cpu_st->devs[i].arch_id) {
+ return &cpu_st->devs[i];
+ }
+ }
+ return NULL;
+}
+
+void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
+ CPUHotplugState *cpu_st, DeviceState *dev, Error **errp)
+{
+ AcpiCpuStatus *cdev;
+
+ cdev = get_cpu_status(cpu_st, dev);
+ if (!cdev) {
+ return;
+ }
+
+ cdev->cpu = CPU(dev);
+ if (dev->hotplugged) {
+ cdev->is_inserting = true;
+ acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
+ }
+}
+
+void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
+ CPUHotplugState *cpu_st,
+ DeviceState *dev, Error **errp)
+{
+ AcpiCpuStatus *cdev;
+
+ cdev = get_cpu_status(cpu_st, dev);
+ if (!cdev) {
+ return;
+ }
+
+ cdev->is_removing = true;
+ acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
+}
+
+void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
+ DeviceState *dev, Error **errp)
+{
+ AcpiCpuStatus *cdev;
+
+ cdev = get_cpu_status(cpu_st, dev);
+ if (!cdev) {
+ return;
+ }
+
+ cdev->cpu = NULL;
+}
+
+static const VMStateDescription vmstate_cpuhp_sts = {
+ .name = "CPU hotplug device state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(is_inserting, AcpiCpuStatus),
+ VMSTATE_BOOL(is_removing, AcpiCpuStatus),
+ VMSTATE_UINT32(ost_event, AcpiCpuStatus),
+ VMSTATE_UINT32(ost_status, AcpiCpuStatus),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_cpu_hotplug = {
+ .name = "CPU hotplug state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(selector, CPUHotplugState),
+ VMSTATE_UINT8(command, CPUHotplugState),
+ VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count,
+ vmstate_cpuhp_sts, AcpiCpuStatus),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define CPU_NAME_FMT "C%.03X"
+#define CPUHP_RES_DEVICE "PRES"
+#define CPU_LOCK "CPLK"
+#define CPU_STS_METHOD "CSTA"
+#define CPU_SCAN_METHOD "CSCN"
+#define CPU_NOTIFY_METHOD "CTFY"
+#define CPU_EJECT_METHOD "CEJ0"
+#define CPU_OST_METHOD "COST"
+
+#define CPU_ENABLED "CPEN"
+#define CPU_SELECTOR "CSEL"
+#define CPU_COMMAND "CCMD"
+#define CPU_DATA "CDAT"
+#define CPU_INSERT_EVENT "CINS"
+#define CPU_REMOVE_EVENT "CRMV"
+#define CPU_EJECT_EVENT "CEJ0"
+
+void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
+ hwaddr io_base,
+ const char *res_root,
+ const char *event_handler_method)
+{
+ Aml *ifctx;
+ Aml *field;
+ Aml *method;
+ Aml *cpu_ctrl_dev;
+ Aml *cpus_dev;
+ Aml *zero = aml_int(0);
+ Aml *one = aml_int(1);
+ Aml *sb_scope = aml_scope("_SB");
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
+ char *cphp_res_path = g_strdup_printf("%s." CPUHP_RES_DEVICE, res_root);
+ Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, NULL);
+ AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
+ AcpiDeviceIf *adev = ACPI_DEVICE_IF(obj);
+
+ cpu_ctrl_dev = aml_device("%s", cphp_res_path);
+ {
+ Aml *crs;
+
+ aml_append(cpu_ctrl_dev,
+ aml_name_decl("_HID", aml_eisaid("PNP0A06")));
+ aml_append(cpu_ctrl_dev,
+ aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
+ aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0));
+
+ crs = aml_resource_template();
+ aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1,
+ ACPI_CPU_HOTPLUG_REG_LEN));
+ aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs));
+
+ /* declare CPU hotplug MMIO region with related access fields */
+ aml_append(cpu_ctrl_dev,
+ aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base),
+ ACPI_CPU_HOTPLUG_REG_LEN));
+
+ field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK,
+ AML_WRITE_AS_ZEROS);
+ aml_append(field, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW * 8));
+ /* 1 if enabled, read only */
+ aml_append(field, aml_named_field(CPU_ENABLED, 1));
+ /* (read) 1 if has a insert event. (write) 1 to clear event */
+ aml_append(field, aml_named_field(CPU_INSERT_EVENT, 1));
+ /* (read) 1 if has a remove event. (write) 1 to clear event */
+ aml_append(field, aml_named_field(CPU_REMOVE_EVENT, 1));
+ /* initiates device eject, write only */
+ aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
+ aml_append(field, aml_reserved_field(4));
+ aml_append(field, aml_named_field(CPU_COMMAND, 8));
+ aml_append(cpu_ctrl_dev, field);
+
+ field = aml_field("PRST", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+ /* CPU selector, write only */
+ aml_append(field, aml_named_field(CPU_SELECTOR, 32));
+ /* flags + cmd + 2byte align */
+ aml_append(field, aml_reserved_field(4 * 8));
+ aml_append(field, aml_named_field(CPU_DATA, 32));
+ aml_append(cpu_ctrl_dev, field);
+
+ if (opts.has_legacy_cphp) {
+ method = aml_method("_INI", 0, AML_SERIALIZED);
+ /* switch off legacy CPU hotplug HW and use new one,
+ * on reboot system is in new mode and writing 0
+ * in CPU_SELECTOR selects BSP, which is NOP at
+ * the time _INI is called */
+ aml_append(method, aml_store(zero, aml_name(CPU_SELECTOR)));
+ aml_append(cpu_ctrl_dev, method);
+ }
+ }
+ aml_append(sb_scope, cpu_ctrl_dev);
+
+ cpus_dev = aml_device("\\_SB.CPUS");
+ {
+ int i;
+ Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
+ Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
+ Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
+ Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
+ Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
+ Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
+ Aml *rm_evt = aml_name("%s.%s", cphp_res_path, CPU_REMOVE_EVENT);
+ Aml *ej_evt = aml_name("%s.%s", cphp_res_path, CPU_EJECT_EVENT);
+
+ aml_append(cpus_dev, aml_name_decl("_HID", aml_string("ACPI0010")));
+ aml_append(cpus_dev, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
+
+ method = aml_method(CPU_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
+ for (i = 0; i < arch_ids->len; i++) {
+ Aml *cpu = aml_name(CPU_NAME_FMT, i);
+ Aml *uid = aml_arg(0);
+ Aml *event = aml_arg(1);
+
+ ifctx = aml_if(aml_equal(uid, aml_int(i)));
+ {
+ aml_append(ifctx, aml_notify(cpu, event));
+ }
+ aml_append(method, ifctx);
+ }
+ aml_append(cpus_dev, method);
+
+ method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED);
+ {
+ Aml *idx = aml_arg(0);
+ Aml *sta = aml_local(0);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(idx, cpu_selector));
+ aml_append(method, aml_store(zero, sta));
+ ifctx = aml_if(aml_equal(is_enabled, one));
+ {
+ aml_append(ifctx, aml_store(aml_int(0xF), sta));
+ }
+ aml_append(method, ifctx);
+ aml_append(method, aml_release(ctrl_lock));
+ aml_append(method, aml_return(sta));
+ }
+ aml_append(cpus_dev, method);
+
+ method = aml_method(CPU_EJECT_METHOD, 1, AML_SERIALIZED);
+ {
+ Aml *idx = aml_arg(0);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(idx, cpu_selector));
+ aml_append(method, aml_store(one, ej_evt));
+ aml_append(method, aml_release(ctrl_lock));
+ }
+ aml_append(cpus_dev, method);
+
+ method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED);
+ {
+ Aml *else_ctx;
+ Aml *while_ctx;
+ Aml *has_event = aml_local(0);
+ Aml *dev_chk = aml_int(1);
+ Aml *eject_req = aml_int(3);
+ Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(one, has_event));
+ while_ctx = aml_while(aml_equal(has_event, one));
+ {
+ /* clear loop exit condition, ins_evt/rm_evt checks
+ * will set it to 1 while next_cpu_cmd returns a CPU
+ * with events */
+ aml_append(while_ctx, aml_store(zero, has_event));
+ aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd));
+ ifctx = aml_if(aml_equal(ins_evt, one));
+ {
+ aml_append(ifctx,
+ aml_call2(CPU_NOTIFY_METHOD, cpu_data, dev_chk));
+ aml_append(ifctx, aml_store(one, ins_evt));
+ aml_append(ifctx, aml_store(one, has_event));
+ }
+ aml_append(while_ctx, ifctx);
+ else_ctx = aml_else();
+ ifctx = aml_if(aml_equal(rm_evt, one));
+ {
+ aml_append(ifctx,
+ aml_call2(CPU_NOTIFY_METHOD, cpu_data, eject_req));
+ aml_append(ifctx, aml_store(one, rm_evt));
+ aml_append(ifctx, aml_store(one, has_event));
+ }
+ aml_append(else_ctx, ifctx);
+ aml_append(while_ctx, else_ctx);
+ }
+ aml_append(method, while_ctx);
+ aml_append(method, aml_release(ctrl_lock));
+ }
+ aml_append(cpus_dev, method);
+
+ method = aml_method(CPU_OST_METHOD, 4, AML_SERIALIZED);
+ {
+ Aml *uid = aml_arg(0);
+ Aml *ev_cmd = aml_int(CPHP_OST_EVENT_CMD);
+ Aml *st_cmd = aml_int(CPHP_OST_STATUS_CMD);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(uid, cpu_selector));
+ aml_append(method, aml_store(ev_cmd, cpu_cmd));
+ aml_append(method, aml_store(aml_arg(1), cpu_data));
+ aml_append(method, aml_store(st_cmd, cpu_cmd));
+ aml_append(method, aml_store(aml_arg(2), cpu_data));
+ aml_append(method, aml_release(ctrl_lock));
+ }
+ aml_append(cpus_dev, method);
+
+ /* build Processor object for each processor */
+ for (i = 0; i < arch_ids->len; i++) {
+ Aml *dev;
+ Aml *uid = aml_int(i);
+ GArray *madt_buf = g_array_new(0, 1, 1);
+ int arch_id = arch_ids->cpus[i].arch_id;
+
+ if (opts.apci_1_compatible && arch_id < 255) {
+ dev = aml_processor(i, 0, 0, CPU_NAME_FMT, i);
+ } else {
+ dev = aml_device(CPU_NAME_FMT, i);
+ aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
+ aml_append(dev, aml_name_decl("_UID", uid));
+ }
+
+ method = aml_method("_STA", 0, AML_SERIALIZED);
+ aml_append(method, aml_return(aml_call1(CPU_STS_METHOD, uid)));
+ aml_append(dev, method);
+
+ /* build _MAT object */
+ assert(adevc && adevc->madt_cpu);
+ adevc->madt_cpu(adev, i, arch_ids, madt_buf);
+ switch (madt_buf->data[0]) {
+ case ACPI_APIC_PROCESSOR: {
+ AcpiMadtProcessorApic *apic = (void *)madt_buf->data;
+ apic->flags = cpu_to_le32(1);
+ break;
+ }
+ default:
+ assert(0);
+ }
+ aml_append(dev, aml_name_decl("_MAT",
+ aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data)));
+ g_array_free(madt_buf, true);
+
+ method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
+ aml_append(method, aml_call1(CPU_EJECT_METHOD, uid));
+ aml_append(dev, method);
+
+ method = aml_method("_OST", 3, AML_SERIALIZED);
+ aml_append(method,
+ aml_call4(CPU_OST_METHOD, uid, aml_arg(0),
+ aml_arg(1), aml_arg(2))
+ );
+ aml_append(dev, method);
+ aml_append(cpus_dev, dev);
+ }
+ }
+ aml_append(sb_scope, cpus_dev);
+ aml_append(table, sb_scope);
+
+ method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED);
+ aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD));
+ aml_append(table, method);
+
+ g_free(cphp_res_path);
+ g_free(arch_ids);
+}
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index fe75bd9ac9..e19d902063 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -34,7 +34,15 @@ static uint64_t cpu_status_read(void *opaque, hwaddr addr, unsigned int size)
static void cpu_status_write(void *opaque, hwaddr addr, uint64_t data,
unsigned int size)
{
- /* TODO: implement VCPU removal on guest signal that CPU can be removed */
+ /* firmware never used to write in CPU present bitmap so use
+ this fact as means to switch QEMU into modern CPU hotplug
+ mode by writing 0 at the beginning of legacy CPU bitmap
+ */
+ if (addr == 0 && data == 0) {
+ AcpiCpuHotplug *cpus = opaque;
+ object_property_set_bool(cpus->device, false, "cpu-hotplug-legacy",
+ &error_abort);
+ }
}
static const MemoryRegionOps AcpiCpuHotplug_ops = {
@@ -83,6 +91,17 @@ void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops,
gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN);
memory_region_add_subregion(parent, base, &gpe_cpu->io);
+ gpe_cpu->device = owner;
+}
+
+void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
+ CPUHotplugState *cpuhp_state,
+ uint16_t io_port)
+{
+ MemoryRegion *parent = pci_address_space_io(PCI_DEVICE(gpe_cpu->device));
+
+ memory_region_del_subregion(parent, &gpe_cpu->io);
+ cpu_hotplug_hw_init(parent, gpe_cpu->device, cpuhp_state, io_port);
}
void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 853c9c4eb7..e5a3c18e52 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -189,6 +189,33 @@ static const VMStateDescription vmstate_tco_io_state = {
}
};
+static bool vmstate_test_use_cpuhp(void *opaque)
+{
+ ICH9LPCPMRegs *s = opaque;
+ return !s->cpu_hotplug_legacy;
+}
+
+static int vmstate_cpuhp_pre_load(void *opaque)
+{
+ ICH9LPCPMRegs *s = opaque;
+ Object *obj = OBJECT(s->gpe_cpu.device);
+ object_property_set_bool(obj, false, "cpu-hotplug-legacy", &error_abort);
+ return 0;
+}
+
+static const VMStateDescription vmstate_cpuhp_state = {
+ .name = "ich9_pm/cpuhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = vmstate_test_use_cpuhp,
+ .pre_load = vmstate_cpuhp_pre_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU_HOTPLUG(cpuhp_state, ICH9LPCPMRegs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_ich9_pm = {
.name = "ich9_pm",
.version_id = 1,
@@ -209,6 +236,7 @@ const VMStateDescription vmstate_ich9_pm = {
.subsections = (const VMStateDescription*[]) {
&vmstate_memhp_state,
&vmstate_tco_io_state,
+ &vmstate_cpuhp_state,
NULL
}
};
@@ -306,6 +334,26 @@ static void ich9_pm_set_memory_hotplug_support(Object *obj, bool value,
s->pm.acpi_memory_hotplug.is_enabled = value;
}
+static bool ich9_pm_get_cpu_hotplug_legacy(Object *obj, Error **errp)
+{
+ ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
+
+ return s->pm.cpu_hotplug_legacy;
+}
+
+static void ich9_pm_set_cpu_hotplug_legacy(Object *obj, bool value,
+ Error **errp)
+{
+ ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
+
+ assert(!value);
+ if (s->pm.cpu_hotplug_legacy && value == false) {
+ acpi_switch_to_modern_cphp(&s->pm.gpe_cpu, &s->pm.cpuhp_state,
+ ICH9_CPU_HOTPLUG_IO_BASE);
+ }
+ s->pm.cpu_hotplug_legacy = value;
+}
+
static void ich9_pm_get_disable_s3(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
@@ -397,6 +445,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp)
{
static const uint32_t gpe0_len = ICH9_PMIO_GPE0_LEN;
pm->acpi_memory_hotplug.is_enabled = true;
+ pm->cpu_hotplug_legacy = true;
pm->disable_s3 = 0;
pm->disable_s4 = 0;
pm->s4_val = 2;
@@ -412,6 +461,10 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp)
ich9_pm_get_memory_hotplug_support,
ich9_pm_set_memory_hotplug_support,
NULL);
+ object_property_add_bool(obj, "cpu-hotplug-legacy",
+ ich9_pm_get_cpu_hotplug_legacy,
+ ich9_pm_set_cpu_hotplug_legacy,
+ NULL);
object_property_add(obj, ACPI_PM_PROP_S3_DISABLED, "uint8",
ich9_pm_get_disable_s3,
ich9_pm_set_disable_s3,
@@ -440,7 +493,11 @@ void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
acpi_memory_plug_cb(hotplug_dev, &lpc->pm.acpi_memory_hotplug,
dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
- legacy_acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.gpe_cpu, dev, errp);
+ if (lpc->pm.cpu_hotplug_legacy) {
+ legacy_acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.gpe_cpu, dev, errp);
+ } else {
+ acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.cpuhp_state, dev, errp);
+ }
} else {
error_setg(errp, "acpi: device plug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -457,6 +514,10 @@ void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
acpi_memory_unplug_request_cb(hotplug_dev,
&lpc->pm.acpi_memory_hotplug, dev,
errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+ !lpc->pm.cpu_hotplug_legacy) {
+ acpi_cpu_unplug_request_cb(hotplug_dev, &lpc->pm.cpuhp_state,
+ dev, errp);
} else {
error_setg(errp, "acpi: device unplug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -471,6 +532,9 @@ void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
if (lpc->pm.acpi_memory_hotplug.is_enabled &&
object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
acpi_memory_unplug_cb(&lpc->pm.acpi_memory_hotplug, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+ !lpc->pm.cpu_hotplug_legacy) {
+ acpi_cpu_unplug_cb(&lpc->pm.cpuhp_state, dev, errp);
} else {
error_setg(errp, "acpi: device unplug for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -482,4 +546,7 @@ void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
acpi_memory_ospm_status(&s->pm.acpi_memory_hotplug, list);
+ if (!s->pm.cpu_hotplug_legacy) {
+ acpi_cpu_ospm_status(&s->pm.cpuhp_state, list);
+ }
}
diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c
new file mode 100644
index 0000000000..7e74ce4460
--- /dev/null
+++ b/hw/acpi/ipmi.c
@@ -0,0 +1,105 @@
+/*
+ * IPMI ACPI firmware handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ipmi/ipmi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ipmi.h"
+
+static Aml *aml_ipmi_crs(IPMIFwInfo *info)
+{
+ Aml *crs = aml_resource_template();
+
+ /*
+ * The base address is fixed and cannot change. That may be different
+ * if someone does PCI, but we aren't there yet.
+ */
+ switch (info->memspace) {
+ case IPMI_MEMSPACE_IO:
+ aml_append(crs, aml_io(AML_DECODE16, info->base_address,
+ info->base_address + info->register_length - 1,
+ info->register_spacing, info->register_length));
+ break;
+ case IPMI_MEMSPACE_MEM32:
+ aml_append(crs,
+ aml_dword_memory(AML_POS_DECODE,
+ AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_NON_CACHEABLE, AML_READ_WRITE,
+ 0xffffffff,
+ info->base_address,
+ info->base_address + info->register_length - 1,
+ info->register_spacing, info->register_length));
+ break;
+ case IPMI_MEMSPACE_MEM64:
+ aml_append(crs,
+ aml_qword_memory(AML_POS_DECODE,
+ AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_NON_CACHEABLE, AML_READ_WRITE,
+ 0xffffffffffffffffULL,
+ info->base_address,
+ info->base_address + info->register_length - 1,
+ info->register_spacing, info->register_length));
+ break;
+ case IPMI_MEMSPACE_SMBUS:
+ aml_append(crs, aml_return(aml_int(info->base_address)));
+ break;
+ default:
+ abort();
+ }
+
+ if (info->interrupt_number) {
+ aml_append(crs, aml_irq_no_flags(info->interrupt_number));
+ }
+
+ return crs;
+}
+
+static Aml *aml_ipmi_device(IPMIFwInfo *info)
+{
+ Aml *dev;
+ uint16_t version = ((info->ipmi_spec_major_revision << 8)
+ | (info->ipmi_spec_minor_revision << 4));
+
+ assert(info->ipmi_spec_minor_revision <= 15);
+
+ dev = aml_device("MI%d", info->uuid);
+ aml_append(dev, aml_name_decl("_HID", aml_eisaid("IPI0001")));
+ aml_append(dev, aml_name_decl("_STR", aml_string("ipmi_%s",
+ info->interface_name)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(info->uuid)));
+ aml_append(dev, aml_name_decl("_CRS", aml_ipmi_crs(info)));
+ aml_append(dev, aml_name_decl("_IFT", aml_int(info->interface_type)));
+ aml_append(dev, aml_name_decl("_SRV", aml_int(version)));
+
+ return dev;
+}
+
+void build_acpi_ipmi_devices(Aml *scope, BusState *bus)
+{
+
+ BusChild *kid;
+
+ QTAILQ_FOREACH(kid, &bus->children, sibling) {
+ IPMIInterface *ii;
+ IPMIInterfaceClass *iic;
+ IPMIFwInfo info;
+ Object *obj = object_dynamic_cast(OBJECT(kid->child),
+ TYPE_IPMI_INTERFACE);
+
+ if (!obj) {
+ continue;
+ }
+
+ ii = IPMI_INTERFACE(obj);
+ iic = IPMI_INTERFACE_GET_CLASS(obj);
+ iic->get_fwinfo(ii, &info);
+ aml_append(scope, aml_ipmi_device(&info));
+ }
+}
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index b4c22627df..e486128aa1 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -216,6 +216,26 @@ static uint32_t nvdimm_slot_to_dcr_index(int slot)
return nvdimm_slot_to_spa_index(slot) + 1;
}
+static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
+{
+ NVDIMMDevice *nvdimm = NULL;
+ GSList *list, *device_list = nvdimm_get_plugged_device_list();
+
+ for (list = device_list; list; list = list->next) {
+ NVDIMMDevice *nvd = list->data;
+ int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP,
+ NULL);
+
+ if (nvdimm_slot_to_handle(slot) == handle) {
+ nvdimm = nvd;
+ break;
+ }
+ }
+
+ g_slist_free(device_list);
+ return nvdimm;
+}
+
/* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
static void
nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
@@ -406,6 +426,282 @@ struct NvdimmDsmFuncNoPayloadOut {
} QEMU_PACKED;
typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
+struct NvdimmFuncGetLabelSizeOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t func_ret_status; /* return status code. */
+ uint32_t label_size; /* the size of label data area. */
+ /*
+ * Maximum size of the namespace label data length supported by
+ * the platform in Get/Set Namespace Label Data functions.
+ */
+ uint32_t max_xfer;
+} QEMU_PACKED;
+typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > 4096);
+
+struct NvdimmFuncGetLabelDataIn {
+ uint32_t offset; /* the offset in the namespace label data area. */
+ uint32_t length; /* the size of data is to be read via the function. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
+ offsetof(NvdimmDsmIn, arg3) > 4096);
+
+struct NvdimmFuncGetLabelDataOut {
+ /* the size of buffer filled by QEMU. */
+ uint32_t len;
+ uint32_t func_ret_status; /* return status code. */
+ uint8_t out_buf[0]; /* the data got via Get Namesapce Label function. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > 4096);
+
+struct NvdimmFuncSetLabelDataIn {
+ uint32_t offset; /* the offset in the namespace label data area. */
+ uint32_t length; /* the size of data is to be written via the function. */
+ uint8_t in_buf[0]; /* the data written to label data area. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
+ offsetof(NvdimmDsmIn, arg3) > 4096);
+
+static void
+nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
+{
+ NvdimmDsmFunc0Out func0 = {
+ .len = cpu_to_le32(sizeof(func0)),
+ .supported_func = cpu_to_le32(supported_func),
+ };
+ cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0));
+}
+
+static void
+nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
+{
+ NvdimmDsmFuncNoPayloadOut out = {
+ .len = cpu_to_le32(sizeof(out)),
+ .func_ret_status = cpu_to_le32(func_ret_status),
+ };
+ cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
+}
+
+static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
+{
+ /*
+ * function 0 is called to inquire which functions are supported by
+ * OSPM
+ */
+ if (!in->function) {
+ nvdimm_dsm_function0(0 /* No function supported other than
+ function 0 */, dsm_mem_addr);
+ return;
+ }
+
+ /* No function except function 0 is supported yet. */
+ nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+}
+
+/*
+ * the max transfer size is the max size transferred by both a
+ * 'Get Namespace Label Data' function and a 'Set Namespace Label Data'
+ * function.
+ */
+static uint32_t nvdimm_get_max_xfer_label_size(void)
+{
+ uint32_t max_get_size, max_set_size, dsm_memory_size = 4096;
+
+ /*
+ * the max data ACPI can read one time which is transferred by
+ * the response of 'Get Namespace Label Data' function.
+ */
+ max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut);
+
+ /*
+ * the max data ACPI can write one time which is transferred by
+ * 'Set Namespace Label Data' function.
+ */
+ max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) -
+ sizeof(NvdimmFuncSetLabelDataIn);
+
+ return MIN(max_get_size, max_set_size);
+}
+
+/*
+ * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4).
+ *
+ * It gets the size of Namespace Label data area and the max data size
+ * that Get/Set Namespace Label Data functions can transfer.
+ */
+static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
+{
+ NvdimmFuncGetLabelSizeOut label_size_out = {
+ .len = cpu_to_le32(sizeof(label_size_out)),
+ };
+ uint32_t label_size, mxfer;
+
+ label_size = nvdimm->label_size;
+ mxfer = nvdimm_get_max_xfer_label_size();
+
+ nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
+
+ label_size_out.func_ret_status = cpu_to_le32(0 /* Success */);
+ label_size_out.label_size = cpu_to_le32(label_size);
+ label_size_out.max_xfer = cpu_to_le32(mxfer);
+
+ cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
+ sizeof(label_size_out));
+}
+
+static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
+ uint32_t offset, uint32_t length)
+{
+ uint32_t ret = 3 /* Invalid Input Parameters */;
+
+ if (offset + length < offset) {
+ nvdimm_debug("offset %#x + length %#x is overflow.\n", offset,
+ length);
+ return ret;
+ }
+
+ if (nvdimm->label_size < offset + length) {
+ nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n",
+ offset + length, nvdimm->label_size);
+ return ret;
+ }
+
+ if (length > nvdimm_get_max_xfer_label_size()) {
+ nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n",
+ length, nvdimm_get_max_xfer_label_size());
+ return ret;
+ }
+
+ return 0 /* Success */;
+}
+
+/*
+ * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5).
+ */
+static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
+ NvdimmFuncGetLabelDataIn *get_label_data;
+ NvdimmFuncGetLabelDataOut *get_label_data_out;
+ uint32_t status;
+ int size;
+
+ get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3;
+ le32_to_cpus(&get_label_data->offset);
+ le32_to_cpus(&get_label_data->length);
+
+ nvdimm_debug("Read Label Data: offset %#x length %#x.\n",
+ get_label_data->offset, get_label_data->length);
+
+ status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
+ get_label_data->length);
+ if (status != 0 /* Success */) {
+ nvdimm_dsm_no_payload(status, dsm_mem_addr);
+ return;
+ }
+
+ size = sizeof(*get_label_data_out) + get_label_data->length;
+ assert(size <= 4096);
+ get_label_data_out = g_malloc(size);
+
+ get_label_data_out->len = cpu_to_le32(size);
+ get_label_data_out->func_ret_status = cpu_to_le32(0 /* Success */);
+ nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
+ get_label_data->length, get_label_data->offset);
+
+ cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size);
+ g_free(get_label_data_out);
+}
+
+/*
+ * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6).
+ */
+static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
+ hwaddr dsm_mem_addr)
+{
+ NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
+ NvdimmFuncSetLabelDataIn *set_label_data;
+ uint32_t status;
+
+ set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3;
+
+ le32_to_cpus(&set_label_data->offset);
+ le32_to_cpus(&set_label_data->length);
+
+ nvdimm_debug("Write Label Data: offset %#x length %#x.\n",
+ set_label_data->offset, set_label_data->length);
+
+ status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
+ set_label_data->length);
+ if (status != 0 /* Success */) {
+ nvdimm_dsm_no_payload(status, dsm_mem_addr);
+ return;
+ }
+
+ assert(sizeof(*in) + sizeof(*set_label_data) + set_label_data->length <=
+ 4096);
+
+ nvc->write_label_data(nvdimm, set_label_data->in_buf,
+ set_label_data->length, set_label_data->offset);
+ nvdimm_dsm_no_payload(0 /* Success */, dsm_mem_addr);
+}
+
+static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
+{
+ NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle);
+
+ /* See the comments in nvdimm_dsm_root(). */
+ if (!in->function) {
+ uint32_t supported_func = 0;
+
+ if (nvdimm && nvdimm->label_size) {
+ supported_func |= 0x1 /* Bit 0 indicates whether there is
+ support for any functions other
+ than function 0. */ |
+ 1 << 4 /* Get Namespace Label Size */ |
+ 1 << 5 /* Get Namespace Label Data */ |
+ 1 << 6 /* Set Namespace Label Data */;
+ }
+ nvdimm_dsm_function0(supported_func, dsm_mem_addr);
+ return;
+ }
+
+ if (!nvdimm) {
+ nvdimm_dsm_no_payload(2 /* Non-Existing Memory Device */,
+ dsm_mem_addr);
+ return;
+ }
+
+ /* Encode DSM function according to DSM Spec Rev1. */
+ switch (in->function) {
+ case 4 /* Get Namespace Label Size */:
+ if (nvdimm->label_size) {
+ nvdimm_dsm_label_size(nvdimm, dsm_mem_addr);
+ return;
+ }
+ break;
+ case 5 /* Get Namespace Label Data */:
+ if (nvdimm->label_size) {
+ nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr);
+ return;
+ }
+ break;
+ case 0x6 /* Set Namespace Label Data */:
+ if (nvdimm->label_size) {
+ nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr);
+ return;
+ }
+ break;
+ }
+
+ nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+}
+
static uint64_t
nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
{
@@ -436,26 +732,22 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
in->handle, in->function);
- /*
- * function 0 is called to inquire which functions are supported by
- * OSPM
- */
- if (in->function == 0) {
- NvdimmDsmFunc0Out func0 = {
- .len = cpu_to_le32(sizeof(func0)),
- /* No function supported other than function 0 */
- .supported_func = cpu_to_le32(0),
- };
- cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof func0);
- } else {
- /* No function except function 0 is supported yet. */
- NvdimmDsmFuncNoPayloadOut out = {
- .len = cpu_to_le32(sizeof(out)),
- .func_ret_status = cpu_to_le32(1) /* Not Supported */,
- };
- cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
+ if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
+ nvdimm_debug("Revision %#x is not supported, expect %#x.\n",
+ in->revision, 0x1);
+ nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+ goto exit;
+ }
+
+ /* Handle 0 is reserved for NVDIMM Root Device. */
+ if (!in->handle) {
+ nvdimm_dsm_root(in, dsm_mem_addr);
+ goto exit;
}
+ nvdimm_dsm_device(in, dsm_mem_addr);
+
+exit:
g_free(in);
}
@@ -487,18 +779,39 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
static void nvdimm_build_common_dsm(Aml *dev)
{
- Aml *method, *ifctx, *function, *dsm_mem, *unpatched, *result_size;
+ Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *result_size;
+ Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
+ Aml *pckg, *pckg_index, *pckg_buf;
uint8_t byte_list[1];
- method = aml_method(NVDIMM_COMMON_DSM, 4, AML_SERIALIZED);
+ method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
+ uuid = aml_arg(0);
function = aml_arg(2);
+ handle = aml_arg(4);
dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
/*
* do not support any method if DSM memory address has not been
* patched.
*/
- unpatched = aml_if(aml_equal(dsm_mem, aml_int(0x0)));
+ unpatched = aml_equal(dsm_mem, aml_int(0x0));
+
+ expected_uuid = aml_local(0);
+
+ ifctx = aml_if(aml_equal(handle, aml_int(0x0)));
+ aml_append(ifctx, aml_store(
+ aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA")
+ /* UUID for NVDIMM Root Device */, expected_uuid));
+ aml_append(method, ifctx);
+ elsectx = aml_else();
+ aml_append(elsectx, aml_store(
+ aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
+ /* UUID for NVDIMM Devices */, expected_uuid));
+ aml_append(method, elsectx);
+
+ uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
+
+ unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL));
/*
* function 0 is called to inquire what functions are supported by
@@ -507,24 +820,42 @@ static void nvdimm_build_common_dsm(Aml *dev)
ifctx = aml_if(aml_equal(function, aml_int(0)));
byte_list[0] = 0 /* No function Supported */;
aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
- aml_append(unpatched, ifctx);
+ aml_append(unsupport, ifctx);
/* No function is supported yet. */
byte_list[0] = 1 /* Not Supported */;
- aml_append(unpatched, aml_return(aml_buffer(1, byte_list)));
- aml_append(method, unpatched);
+ aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
+ aml_append(method, unsupport);
/*
* The HDLE indicates the DSM function is issued from which device,
- * it is not used at this time as no function is supported yet.
- * Currently we make it always be 0 for all the devices and will set
- * the appropriate value once real function is implemented.
+ * it reserves 0 for root device and is the handle for NVDIMM devices.
+ * See the comments in nvdimm_slot_to_handle().
*/
- aml_append(method, aml_store(aml_int(0x0), aml_name("HDLE")));
+ aml_append(method, aml_store(handle, aml_name("HDLE")));
aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
/*
+ * The fourth parameter (Arg3) of _DSM is a package which contains
+ * a buffer, the layout of the buffer is specified by UUID (Arg0),
+ * Revision ID (Arg1) and Function Index (Arg2) which are documented
+ * in the DSM Spec.
+ */
+ pckg = aml_arg(3);
+ ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg),
+ aml_int(4 /* Package */)) /* It is a Package? */,
+ aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */,
+ NULL));
+
+ pckg_index = aml_local(2);
+ pckg_buf = aml_local(3);
+ aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
+ aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
+ aml_append(ifctx, aml_store(pckg_buf, aml_name("ARG3")));
+ aml_append(method, ifctx);
+
+ /*
* tell QEMU about the real address of DSM memory, then QEMU
* gets the control and fills the result in DSM memory.
*/
@@ -542,13 +873,14 @@ static void nvdimm_build_common_dsm(Aml *dev)
aml_append(dev, method);
}
-static void nvdimm_build_device_dsm(Aml *dev)
+static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
{
Aml *method;
method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
- aml_append(method, aml_return(aml_call4(NVDIMM_COMMON_DSM, aml_arg(0),
- aml_arg(1), aml_arg(2), aml_arg(3))));
+ aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0),
+ aml_arg(1), aml_arg(2), aml_arg(3),
+ aml_int(handle))));
aml_append(dev, method);
}
@@ -573,7 +905,7 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
*/
aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
- nvdimm_build_device_dsm(nvdimm_dev);
+ nvdimm_build_device_dsm(nvdimm_dev, handle);
aml_append(root_dev, nvdimm_dev);
}
}
@@ -665,7 +997,9 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
aml_append(dev, field);
nvdimm_build_common_dsm(dev);
- nvdimm_build_device_dsm(dev);
+
+ /* 0 is reserved for root device. */
+ nvdimm_build_device_dsm(dev, 0);
nvdimm_build_nvdimm_devices(device_list, dev);
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index c48cb1b91a..2adc246b00 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -34,6 +34,7 @@
#include "hw/acpi/piix4.h"
#include "hw/acpi/pcihp.h"
#include "hw/acpi/cpu_hotplug.h"
+#include "hw/acpi/cpu.h"
#include "hw/hotplug.h"
#include "hw/mem/pc-dimm.h"
#include "hw/acpi/memory_hotplug.h"
@@ -86,7 +87,9 @@ typedef struct PIIX4PMState {
uint8_t disable_s4;
uint8_t s4_val;
+ bool cpu_hotplug_legacy;
AcpiCpuHotplug gpe_cpu;
+ CPUHotplugState cpuhp_state;
MemHotplugState acpi_memory_hotplug;
} PIIX4PMState;
@@ -273,6 +276,32 @@ static const VMStateDescription vmstate_memhp_state = {
}
};
+static bool vmstate_test_use_cpuhp(void *opaque)
+{
+ PIIX4PMState *s = opaque;
+ return !s->cpu_hotplug_legacy;
+}
+
+static int vmstate_cpuhp_pre_load(void *opaque)
+{
+ Object *obj = OBJECT(opaque);
+ object_property_set_bool(obj, false, "cpu-hotplug-legacy", &error_abort);
+ return 0;
+}
+
+static const VMStateDescription vmstate_cpuhp_state = {
+ .name = "piix4_pm/cpuhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = vmstate_test_use_cpuhp,
+ .pre_load = vmstate_cpuhp_pre_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU_HOTPLUG(cpuhp_state, PIIX4PMState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
/* qemu-kvm 1.2 uses version 3 but advertised as 2
* To support incoming qemu-kvm 1.2 migration, change version_id
* and minimum_version_id to 2 below (which breaks migration from
@@ -307,6 +336,7 @@ static const VMStateDescription vmstate_acpi = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_memhp_state,
+ &vmstate_cpuhp_state,
NULL
}
};
@@ -352,7 +382,11 @@ static void piix4_device_plug_cb(HotplugHandler *hotplug_dev,
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_plug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
- legacy_acpi_cpu_plug_cb(hotplug_dev, &s->gpe_cpu, dev, errp);
+ if (s->cpu_hotplug_legacy) {
+ legacy_acpi_cpu_plug_cb(hotplug_dev, &s->gpe_cpu, dev, errp);
+ } else {
+ acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
+ }
} else {
error_setg(errp, "acpi: device plug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -371,6 +405,9 @@ static void piix4_device_unplug_request_cb(HotplugHandler *hotplug_dev,
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_unplug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev,
errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+ !s->cpu_hotplug_legacy) {
+ acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
} else {
error_setg(errp, "acpi: device unplug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -385,6 +422,9 @@ static void piix4_device_unplug_cb(HotplugHandler *hotplug_dev,
if (s->acpi_memory_hotplug.is_enabled &&
object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
acpi_memory_unplug_cb(&s->acpi_memory_hotplug, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+ !s->cpu_hotplug_legacy) {
+ acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp);
} else {
error_setg(errp, "acpi: device unplug for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -560,6 +600,26 @@ static const MemoryRegionOps piix4_gpe_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
+
+static bool piix4_get_cpu_hotplug_legacy(Object *obj, Error **errp)
+{
+ PIIX4PMState *s = PIIX4_PM(obj);
+
+ return s->cpu_hotplug_legacy;
+}
+
+static void piix4_set_cpu_hotplug_legacy(Object *obj, bool value, Error **errp)
+{
+ PIIX4PMState *s = PIIX4_PM(obj);
+
+ assert(!value);
+ if (s->cpu_hotplug_legacy && value == false) {
+ acpi_switch_to_modern_cphp(&s->gpe_cpu, &s->cpuhp_state,
+ PIIX4_CPU_HOTPLUG_IO_BASE);
+ }
+ s->cpu_hotplug_legacy = value;
+}
+
static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
PCIBus *bus, PIIX4PMState *s)
{
@@ -570,6 +630,11 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent,
s->use_acpi_pci_hotplug);
+ s->cpu_hotplug_legacy = true;
+ object_property_add_bool(OBJECT(s), "cpu-hotplug-legacy",
+ piix4_get_cpu_hotplug_legacy,
+ piix4_set_cpu_hotplug_legacy,
+ NULL);
legacy_acpi_cpu_hotplug_init(parent, OBJECT(s), &s->gpe_cpu,
PIIX4_CPU_HOTPLUG_IO_BASE);
@@ -583,6 +648,9 @@ static void piix4_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
PIIX4PMState *s = PIIX4_PM(adev);
acpi_memory_ospm_status(&s->acpi_memory_hotplug, list);
+ if (!s->cpu_hotplug_legacy) {
+ acpi_cpu_ospm_status(&s->cpuhp_state, list);
+ }
}
static void piix4_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
@@ -631,6 +699,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void *data)
hc->unplug = piix4_device_unplug_cb;
adevc->ospm_status = piix4_ospm_status;
adevc->send_event = piix4_send_gpe;
+ adevc->madt_cpu = pc_madt_cpu_entry;
}
static const TypeInfo piix4_pm_info = {
diff --git a/hw/acpi/trace-events b/hw/acpi/trace-events
index e95b2183ac..5aa3ba67c8 100644
--- a/hw/acpi/trace-events
+++ b/hw/acpi/trace-events
@@ -16,3 +16,17 @@ mhp_acpi_clear_insert_evt(uint32_t slot) "slot[0x%"PRIx32"] clear insert event"
mhp_acpi_clear_remove_evt(uint32_t slot) "slot[0x%"PRIx32"] clear remove event"
mhp_acpi_pc_dimm_deleted(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm deleted"
mhp_acpi_pc_dimm_delete_failed(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm delete failed"
+
+# hw/acpi/cpu.c
+cpuhp_acpi_invalid_idx_selected(uint32_t idx) "0x%"PRIx32
+cpuhp_acpi_read_flags(uint32_t idx, uint8_t flags) "idx[0x%"PRIx32"] flags: 0x%"PRIx8
+cpuhp_acpi_write_idx(uint32_t idx) "set active cpu idx: 0x%"PRIx32
+cpuhp_acpi_write_cmd(uint32_t idx, uint8_t cmd) "idx[0x%"PRIx32"] cmd: 0x%"PRIx8
+cpuhp_acpi_read_cmd_data(uint32_t idx, uint32_t data) "idx[0x%"PRIx32"] data: 0x%"PRIx32
+cpuhp_acpi_cpu_has_events(uint32_t idx, bool ins, bool rm) "idx[0x%"PRIx32"] inserting: %d, removing: %d"
+cpuhp_acpi_clear_inserting_evt(uint32_t idx) "idx[0x%"PRIx32"]"
+cpuhp_acpi_clear_remove_evt(uint32_t idx) "idx[0x%"PRIx32"]"
+cpuhp_acpi_ejecting_invalid_cpu(uint32_t idx) "0x%"PRIx32
+cpuhp_acpi_ejecting_cpu(uint32_t idx) "0x%"PRIx32
+cpuhp_acpi_write_ost_ev(uint32_t slot, uint32_t ev) "idx[0x%"PRIx32"] OST EVENT: 0x%"PRIx32
+cpuhp_acpi_write_ost_status(uint32_t slot, uint32_t st) "idx[0x%"PRIx32"] OST STATUS: 0x%"PRIx32
diff --git a/hw/arm/ast2400.c b/hw/arm/ast2400.c
index 4a9de0e10c..0555843620 100644
--- a/hw/arm/ast2400.c
+++ b/hw/arm/ast2400.c
@@ -23,10 +23,19 @@
#define AST2400_UART_5_BASE 0x00184000
#define AST2400_IOMEM_SIZE 0x00200000
#define AST2400_IOMEM_BASE 0x1E600000
+#define AST2400_SMC_BASE AST2400_IOMEM_BASE /* Legacy SMC */
+#define AST2400_FMC_BASE 0X1E620000
+#define AST2400_SPI_BASE 0X1E630000
#define AST2400_VIC_BASE 0x1E6C0000
+#define AST2400_SCU_BASE 0x1E6E2000
#define AST2400_TIMER_BASE 0x1E782000
#define AST2400_I2C_BASE 0x1E78A000
+#define AST2400_FMC_FLASH_BASE 0x20000000
+#define AST2400_SPI_FLASH_BASE 0x30000000
+
+#define AST2400_A0_SILICON_REV 0x02000303
+
static const int uart_irqs[] = { 9, 32, 33, 34, 10 };
static const int timer_irqs[] = { 16, 17, 18, 35, 36, 37, 38, 39, };
@@ -72,13 +81,31 @@ static void ast2400_init(Object *obj)
object_initialize(&s->i2c, sizeof(s->i2c), TYPE_ASPEED_I2C);
object_property_add_child(obj, "i2c", OBJECT(&s->i2c), NULL);
qdev_set_parent_bus(DEVICE(&s->i2c), sysbus_get_default());
+
+ object_initialize(&s->scu, sizeof(s->scu), TYPE_ASPEED_SCU);
+ object_property_add_child(obj, "scu", OBJECT(&s->scu), NULL);
+ qdev_set_parent_bus(DEVICE(&s->scu), sysbus_get_default());
+ qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev",
+ AST2400_A0_SILICON_REV);
+ object_property_add_alias(obj, "hw-strap1", OBJECT(&s->scu),
+ "hw-strap1", &error_abort);
+ object_property_add_alias(obj, "hw-strap2", OBJECT(&s->scu),
+ "hw-strap2", &error_abort);
+
+ object_initialize(&s->smc, sizeof(s->smc), "aspeed.smc.fmc");
+ object_property_add_child(obj, "smc", OBJECT(&s->smc), NULL);
+ qdev_set_parent_bus(DEVICE(&s->smc), sysbus_get_default());
+
+ object_initialize(&s->spi, sizeof(s->spi), "aspeed.smc.spi");
+ object_property_add_child(obj, "spi", OBJECT(&s->spi), NULL);
+ qdev_set_parent_bus(DEVICE(&s->spi), sysbus_get_default());
}
static void ast2400_realize(DeviceState *dev, Error **errp)
{
int i;
AST2400State *s = AST2400(dev);
- Error *err = NULL;
+ Error *err = NULL, *local_err = NULL;
/* IO space */
memory_region_init_io(&s->iomem, NULL, &ast2400_io_ops, NULL,
@@ -110,6 +137,14 @@ static void ast2400_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq);
}
+ /* SCU */
+ object_property_set_bool(OBJECT(&s->scu), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->scu), 0, AST2400_SCU_BASE);
+
/* UART - attach an 8250 to the IO space as our UART5 */
if (serial_hds[0]) {
qemu_irq uart5 = qdev_get_gpio_in(DEVICE(&s->vic), uart_irqs[4]);
@@ -126,6 +161,30 @@ static void ast2400_realize(DeviceState *dev, Error **errp)
sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c), 0, AST2400_I2C_BASE);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0,
qdev_get_gpio_in(DEVICE(&s->vic), 12));
+
+ /* SMC */
+ object_property_set_int(OBJECT(&s->smc), 1, "num-cs", &err);
+ object_property_set_bool(OBJECT(&s->smc), true, "realized", &local_err);
+ error_propagate(&err, local_err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->smc), 0, AST2400_FMC_BASE);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->smc), 1, AST2400_FMC_FLASH_BASE);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->smc), 0,
+ qdev_get_gpio_in(DEVICE(&s->vic), 19));
+
+ /* SPI */
+ object_property_set_int(OBJECT(&s->spi), 1, "num-cs", &err);
+ object_property_set_bool(OBJECT(&s->spi), true, "realized", &local_err);
+ error_propagate(&err, local_err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi), 0, AST2400_SPI_BASE);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi), 1, AST2400_SPI_FLASH_BASE);
}
static void ast2400_class_init(ObjectClass *oc, void *data)
diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c
index 1cd749aa4b..1a53e51cf1 100644
--- a/hw/arm/fsl-imx25.c
+++ b/hw/arm/fsl-imx25.c
@@ -249,16 +249,16 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp)
}
/* initialize 2 x 16 KB ROM */
- memory_region_init_rom_device(&s->rom[0], NULL, NULL, NULL,
- "imx25.rom0", FSL_IMX25_ROM0_SIZE, &err);
+ memory_region_init_rom(&s->rom[0], NULL,
+ "imx25.rom0", FSL_IMX25_ROM0_SIZE, &err);
if (err) {
error_propagate(errp, err);
return;
}
memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM0_ADDR,
&s->rom[0]);
- memory_region_init_rom_device(&s->rom[1], NULL, NULL, NULL,
- "imx25.rom1", FSL_IMX25_ROM1_SIZE, &err);
+ memory_region_init_rom(&s->rom[1], NULL,
+ "imx25.rom1", FSL_IMX25_ROM1_SIZE, &err);
if (err) {
error_propagate(errp, err);
return;
diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c
index 31a3a87911..b283b71eb4 100644
--- a/hw/arm/fsl-imx31.c
+++ b/hw/arm/fsl-imx31.c
@@ -219,9 +219,8 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp)
}
/* On a real system, the first 16k is a `secure boot rom' */
- memory_region_init_rom_device(&s->secure_rom, NULL, NULL, NULL,
- "imx31.secure_rom",
- FSL_IMX31_SECURE_ROM_SIZE, &err);
+ memory_region_init_rom(&s->secure_rom, NULL, "imx31.secure_rom",
+ FSL_IMX31_SECURE_ROM_SIZE, &err);
if (err) {
error_propagate(errp, err);
return;
@@ -230,8 +229,8 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp)
&s->secure_rom);
/* There is also a 16k ROM */
- memory_region_init_rom_device(&s->rom, NULL, NULL, NULL, "imx31.rom",
- FSL_IMX31_ROM_SIZE, &err);
+ memory_region_init_rom(&s->rom, NULL, "imx31.rom",
+ FSL_IMX31_ROM_SIZE, &err);
if (err) {
error_propagate(errp, err);
return;
diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c
index 0c00e7a560..ed392a9e7f 100644
--- a/hw/arm/fsl-imx6.c
+++ b/hw/arm/fsl-imx6.c
@@ -399,8 +399,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
FSL_IMX6_ENET_MAC_1588_IRQ));
/* ROM memory */
- memory_region_init_rom_device(&s->rom, NULL, NULL, NULL, "imx6.rom",
- FSL_IMX6_ROM_SIZE, &err);
+ memory_region_init_rom(&s->rom, NULL, "imx6.rom",
+ FSL_IMX6_ROM_SIZE, &err);
if (err) {
error_propagate(errp, err);
return;
@@ -409,8 +409,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp)
&s->rom);
/* CAAM memory */
- memory_region_init_rom_device(&s->caam, NULL, NULL, NULL, "imx6.caam",
- FSL_IMX6_CAAM_MEM_SIZE, &err);
+ memory_region_init_rom(&s->caam, NULL, "imx6.caam",
+ FSL_IMX6_CAAM_MEM_SIZE, &err);
if (err) {
error_propagate(errp, err);
return;
diff --git a/hw/arm/palmetto-bmc.c b/hw/arm/palmetto-bmc.c
index a51d960510..54e29a865d 100644
--- a/hw/arm/palmetto-bmc.c
+++ b/hw/arm/palmetto-bmc.c
@@ -18,6 +18,8 @@
#include "hw/arm/ast2400.h"
#include "hw/boards.h"
#include "qemu/log.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
static struct arm_boot_info palmetto_bmc_binfo = {
.loader_start = AST2400_SDRAM_BASE,
@@ -30,6 +32,32 @@ typedef struct PalmettoBMCState {
MemoryRegion ram;
} PalmettoBMCState;
+static void palmetto_bmc_init_flashes(AspeedSMCState *s, const char *flashtype,
+ Error **errp)
+{
+ int i ;
+
+ for (i = 0; i < s->num_cs; ++i) {
+ AspeedSMCFlash *fl = &s->flashes[i];
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
+ qemu_irq cs_line;
+
+ /*
+ * FIXME: check that we are not using a flash module exceeding
+ * the controller segment size
+ */
+ fl->flash = ssi_create_slave_no_init(s->spi, flashtype);
+ if (dinfo) {
+ qdev_prop_set_drive(fl->flash, "drive", blk_by_legacy_dinfo(dinfo),
+ errp);
+ }
+ qdev_init_nofail(fl->flash);
+
+ cs_line = qdev_get_gpio_in_named(fl->flash, SSI_GPIO_CS, 0);
+ sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line);
+ }
+}
+
static void palmetto_bmc_init(MachineState *machine)
{
PalmettoBMCState *bmc;
@@ -44,9 +72,14 @@ static void palmetto_bmc_init(MachineState *machine)
&bmc->ram);
object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram),
&error_abort);
+ object_property_set_int(OBJECT(&bmc->soc), 0x120CE416, "hw-strap1",
+ &error_abort);
object_property_set_bool(OBJECT(&bmc->soc), true, "realized",
&error_abort);
+ palmetto_bmc_init_flashes(&bmc->soc.smc, "n25q256a", &error_abort);
+ palmetto_bmc_init_flashes(&bmc->soc.spi, "mx25l25635e", &error_abort);
+
palmetto_bmc_binfo.kernel_filename = machine->kernel_filename;
palmetto_bmc_binfo.initrd_filename = machine->initrd_filename;
palmetto_bmc_binfo.kernel_cmdline = machine->kernel_cmdline;
diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c
index 776c51e398..4e7ac8cc4f 100644
--- a/hw/arm/sabrelite.c
+++ b/hw/arm/sabrelite.c
@@ -86,13 +86,19 @@ static void sabrelite_init(MachineState *machine)
spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(spi_dev), "spi");
if (spi_bus) {
DeviceState *flash_dev;
-
- flash_dev = ssi_create_slave(spi_bus, "sst25vf016b");
- if (flash_dev) {
- qemu_irq cs_line = qdev_get_gpio_in_named(flash_dev,
- SSI_GPIO_CS, 0);
- sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line);
+ qemu_irq cs_line;
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
+
+ flash_dev = ssi_create_slave_no_init(spi_bus, "sst25vf016b");
+ if (dinfo) {
+ qdev_prop_set_drive(flash_dev, "drive",
+ blk_by_legacy_dinfo(dinfo),
+ &error_fatal);
}
+ qdev_init_nofail(flash_dev);
+
+ cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0);
+ sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line);
}
}
}
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index ba40f8302b..41cc2eeeb1 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -598,15 +598,13 @@ static uint32_t spitz_lcdtg_transfer(SSISlave *dev, uint32_t value)
return 0;
}
-static int spitz_lcdtg_init(SSISlave *dev)
+static void spitz_lcdtg_realize(SSISlave *dev, Error **errp)
{
SpitzLCDTG *s = FROM_SSI_SLAVE(SpitzLCDTG, dev);
spitz_lcdtg = s;
s->bl_power = 0;
s->bl_intensity = 0x20;
-
- return 0;
}
/* SSP devices */
@@ -666,7 +664,7 @@ static void spitz_adc_temp_on(void *opaque, int line, int level)
max111x_set_input(max1111, MAX1111_BATT_TEMP, 0);
}
-static int corgi_ssp_init(SSISlave *d)
+static void corgi_ssp_realize(SSISlave *d, Error **errp)
{
DeviceState *dev = DEVICE(d);
CorgiSSPState *s = FROM_SSI_SLAVE(CorgiSSPState, d);
@@ -675,8 +673,6 @@ static int corgi_ssp_init(SSISlave *d)
s->bus[0] = ssi_create_bus(dev, "ssi0");
s->bus[1] = ssi_create_bus(dev, "ssi1");
s->bus[2] = ssi_create_bus(dev, "ssi2");
-
- return 0;
}
static void spitz_ssp_attach(PXA2xxState *cpu)
@@ -1121,7 +1117,7 @@ static void corgi_ssp_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = corgi_ssp_init;
+ k->realize = corgi_ssp_realize;
k->transfer = corgi_ssp_transfer;
dc->vmsd = &vmstate_corgi_ssp_regs;
}
@@ -1150,7 +1146,7 @@ static void spitz_lcdtg_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = spitz_lcdtg_init;
+ k->realize = spitz_lcdtg_realize;
k->transfer = spitz_lcdtg_transfer;
dc->vmsd = &vmstate_spitz_lcdtg_regs;
}
diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c
index 4e9494f94c..2db66508b5 100644
--- a/hw/arm/tosa.c
+++ b/hw/arm/tosa.c
@@ -127,10 +127,9 @@ static uint32_t tosa_ssp_tansfer(SSISlave *dev, uint32_t value)
return 0;
}
-static int tosa_ssp_init(SSISlave *dev)
+static void tosa_ssp_realize(SSISlave *dev, Error **errp)
{
/* Nothing to do. */
- return 0;
}
#define TYPE_TOSA_DAC "tosa_dac"
@@ -283,7 +282,7 @@ static void tosa_ssp_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = tosa_ssp_init;
+ k->realize = tosa_ssp_realize;
k->transfer = tosa_ssp_tansfer;
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index c5c125e920..6e098afd1f 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1021,6 +1021,7 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0,
nr_pcie_buses - 1);
+ qemu_fdt_setprop(vbi->fdt, nodename, "dma-coherent", NULL, 0);
if (vbi->v2m_phandle) {
qemu_fdt_setprop_cells(vbi->fdt, nodename, "msi-parent",
diff --git a/hw/arm/xilinx_zynq.c b/hw/arm/xilinx_zynq.c
index aefebcfa6d..7dac20d67d 100644
--- a/hw/arm/xilinx_zynq.c
+++ b/hw/arm/xilinx_zynq.c
@@ -138,7 +138,13 @@ static inline void zynq_init_spi_flashes(uint32_t base_addr, qemu_irq irq,
spi = (SSIBus *)qdev_get_child_bus(dev, bus_name);
for (j = 0; j < num_ss; ++j) {
- flash_dev = ssi_create_slave(spi, "n25q128");
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
+ flash_dev = ssi_create_slave_no_init(spi, "n25q128");
+ if (dinfo) {
+ qdev_prop_set_drive(flash_dev, "drive",
+ blk_by_legacy_dinfo(dinfo), &error_fatal);
+ }
+ qdev_init_nofail(flash_dev);
cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0);
sysbus_connect_irq(busdev, i * num_ss + j + 1, cs_line);
@@ -294,6 +300,12 @@ static void zynq_init(MachineState *machine)
sysbus_connect_irq(busdev, n + 1, pic[dma_irqs[n] - IRQ_OFFSET]);
}
+ dev = qdev_create(NULL, "xlnx.ps7-dev-cfg");
+ qdev_init_nofail(dev);
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_connect_irq(busdev, 0, pic[40 - IRQ_OFFSET]);
+ sysbus_mmio_map(busdev, 0, 0xF8007000);
+
zynq_binfo.ram_size = ram_size;
zynq_binfo.kernel_filename = kernel_filename;
zynq_binfo.kernel_cmdline = kernel_cmdline;
diff --git a/hw/arm/xlnx-ep108.c b/hw/arm/xlnx-ep108.c
index 34b4641712..4ec590a25d 100644
--- a/hw/arm/xlnx-ep108.c
+++ b/hw/arm/xlnx-ep108.c
@@ -88,12 +88,19 @@ static void xlnx_ep108_init(MachineState *machine)
SSIBus *spi_bus;
DeviceState *flash_dev;
qemu_irq cs_line;
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
gchar *bus_name = g_strdup_printf("spi%d", i);
spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(&s->soc), bus_name);
g_free(bus_name);
- flash_dev = ssi_create_slave(spi_bus, "sst25wf080");
+ flash_dev = ssi_create_slave_no_init(spi_bus, "sst25wf080");
+ if (dinfo) {
+ qdev_prop_set_drive(flash_dev, "drive", blk_by_legacy_dinfo(dinfo),
+ &error_fatal);
+ }
+ qdev_init_nofail(flash_dev);
+
cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi[i]), 1, cs_line);
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
index aea895a500..68a92f3184 100644
--- a/hw/arm/z2.c
+++ b/hw/arm/z2.c
@@ -151,14 +151,12 @@ static void z2_lcd_cs(void *opaque, int line, int level)
z2_lcd->selected = !level;
}
-static int zipit_lcd_init(SSISlave *dev)
+static void zipit_lcd_realize(SSISlave *dev, Error **errp)
{
ZipitLCD *z = FROM_SSI_SLAVE(ZipitLCD, dev);
z->selected = 0;
z->enabled = 0;
z->pos = 0;
-
- return 0;
}
static VMStateDescription vmstate_zipit_lcd_state = {
@@ -181,7 +179,7 @@ static void zipit_lcd_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = zipit_lcd_init;
+ k->realize = zipit_lcd_realize;
k->transfer = zipit_lcd_transfer;
dc->vmsd = &vmstate_zipit_lcd_state;
}
diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c
index d2599604d1..42a6f4885a 100644
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -31,6 +31,7 @@
#include "qemu/timer.h"
#include "hw/timer/i8254.h"
#include "hw/audio/pcspk.h"
+#include "qapi/error.h"
#define PCSPK_BUF_LEN 1792
#define PCSPK_SAMPLE_RATE 32000
@@ -169,6 +170,11 @@ static void pcspk_initfn(Object *obj)
PCSpkState *s = PC_SPEAKER(obj);
memory_region_init_io(&s->ioport, OBJECT(s), &pcspk_io_ops, s, "pcspk", 1);
+
+ object_property_add_link(obj, "pit", TYPE_PIT_COMMON,
+ (Object **)&s->pit,
+ qdev_prop_allow_set_link_before_realize,
+ 0, &error_abort);
}
static void pcspk_realizefn(DeviceState *dev, Error **errp)
@@ -183,7 +189,6 @@ static void pcspk_realizefn(DeviceState *dev, Error **errp)
static Property pcspk_properties[] = {
DEFINE_PROP_UINT32("iobase", PCSpkState, iobase, -1),
- DEFINE_PROP_PTR("pit", PCSpkState, pit),
DEFINE_PROP_END_OF_LIST(),
};
@@ -194,7 +199,7 @@ static void pcspk_class_initfn(ObjectClass *klass, void *data)
dc->realize = pcspk_realizefn;
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->props = pcspk_properties;
- /* Reason: pointer property "pit", realize sets global pcspk_state */
+ /* Reason: realize sets global pcspk_state */
dc->cannot_instantiate_with_device_add_yet = true;
}
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 2073f9a270..54b9ac1da6 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -31,11 +31,9 @@ struct VirtIOBlockDataPlane {
bool stopping;
VirtIOBlkConf *conf;
-
VirtIODevice *vdev;
- VirtQueue *vq; /* virtqueue vring */
- EventNotifier *guest_notifier; /* irq */
QEMUBH *bh; /* bh for guest notification */
+ unsigned long *batch_notify_vqs;
/* Note that these EventNotifiers are assigned by value. This is
* fine as long as you do not call event_notifier_cleanup on them
@@ -47,20 +45,36 @@ struct VirtIOBlockDataPlane {
};
/* Raise an interrupt to signal guest, if necessary */
-void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s)
+void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq)
{
+ set_bit(virtio_get_queue_index(vq), s->batch_notify_vqs);
qemu_bh_schedule(s->bh);
}
static void notify_guest_bh(void *opaque)
{
VirtIOBlockDataPlane *s = opaque;
+ unsigned nvqs = s->conf->num_queues;
+ unsigned long bitmap[BITS_TO_LONGS(nvqs)];
+ unsigned j;
- if (!virtio_should_notify(s->vdev, s->vq)) {
- return;
- }
+ memcpy(bitmap, s->batch_notify_vqs, sizeof(bitmap));
+ memset(s->batch_notify_vqs, 0, sizeof(bitmap));
+
+ for (j = 0; j < nvqs; j += BITS_PER_LONG) {
+ unsigned long bits = bitmap[j];
- event_notifier_set(s->guest_notifier);
+ while (bits != 0) {
+ unsigned i = j + ctzl(bits);
+ VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+ if (virtio_should_notify(s->vdev, vq)) {
+ event_notifier_set(virtio_queue_get_guest_notifier(vq));
+ }
+
+ bits &= bits - 1; /* clear right-most bit */
+ }
+ }
}
/* Context: QEMU global mutex held */
@@ -79,7 +93,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
}
/* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->set_host_notifier) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_started) {
error_setg(errp,
"device is incompatible with dataplane "
"(transport does not support notifiers)");
@@ -104,6 +118,7 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
}
s->ctx = iothread_get_aio_context(s->iothread);
s->bh = aio_bh_new(s->ctx, notify_guest_bh, s);
+ s->batch_notify_vqs = bitmap_new(conf->num_queues);
*dataplane = s;
}
@@ -116,6 +131,7 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s)
}
virtio_blk_data_plane_stop(s);
+ g_free(s->batch_notify_vqs);
qemu_bh_delete(s->bh);
object_unref(OBJECT(s->iothread));
g_free(s);
@@ -138,6 +154,8 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
+ unsigned i;
+ unsigned nvqs = s->conf->num_queues;
int r;
if (vblk->dataplane_started || s->starting) {
@@ -145,22 +163,25 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
}
s->starting = true;
- s->vq = virtio_get_queue(s->vdev, 0);
/* Set up guest notifier (irq) */
- r = k->set_guest_notifiers(qbus->parent, 1, true);
+ r = k->set_guest_notifiers(qbus->parent, nvqs, true);
if (r != 0) {
fprintf(stderr, "virtio-blk failed to set guest notifier (%d), "
"ensure -enable-kvm is set\n", r);
goto fail_guest_notifiers;
}
- s->guest_notifier = virtio_queue_get_guest_notifier(s->vq);
/* Set up virtqueue notify */
- r = k->set_host_notifier(qbus->parent, 0, true);
- if (r != 0) {
- fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
- goto fail_host_notifier;
+ for (i = 0; i < nvqs; i++) {
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, true);
+ if (r != 0) {
+ fprintf(stderr, "virtio-blk failed to set host notifier (%d)\n", r);
+ while (i--) {
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
+ }
+ goto fail_guest_notifiers;
+ }
}
s->starting = false;
@@ -170,17 +191,23 @@ void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s)
blk_set_aio_context(s->conf->conf.blk, s->ctx);
/* Kick right away to begin processing requests already in vring */
- event_notifier_set(virtio_queue_get_host_notifier(s->vq));
+ for (i = 0; i < nvqs; i++) {
+ VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+ event_notifier_set(virtio_queue_get_host_notifier(vq));
+ }
/* Get this show started by hooking up our callbacks */
aio_context_acquire(s->ctx);
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx,
- virtio_blk_data_plane_handle_output);
+ for (i = 0; i < nvqs; i++) {
+ VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+ virtio_queue_aio_set_host_notifier_handler(vq, s->ctx,
+ virtio_blk_data_plane_handle_output);
+ }
aio_context_release(s->ctx);
return;
- fail_host_notifier:
- k->set_guest_notifiers(qbus->parent, 1, false);
fail_guest_notifiers:
vblk->dataplane_disabled = true;
s->starting = false;
@@ -193,6 +220,8 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s->vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
VirtIOBlock *vblk = VIRTIO_BLK(s->vdev);
+ unsigned i;
+ unsigned nvqs = s->conf->num_queues;
if (!vblk->dataplane_started || s->stopping) {
return;
@@ -210,17 +239,23 @@ void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s)
aio_context_acquire(s->ctx);
/* Stop notifications for new requests from guest */
- virtio_queue_aio_set_host_notifier_handler(s->vq, s->ctx, NULL);
+ for (i = 0; i < nvqs; i++) {
+ VirtQueue *vq = virtio_get_queue(s->vdev, i);
+
+ virtio_queue_aio_set_host_notifier_handler(vq, s->ctx, NULL);
+ }
/* Drain and switch bs back to the QEMU main loop */
blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
aio_context_release(s->ctx);
- k->set_host_notifier(qbus->parent, 0, false);
+ for (i = 0; i < nvqs; i++) {
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
+ }
/* Clean up guest notifier (irq) */
- k->set_guest_notifiers(qbus->parent, 1, false);
+ k->set_guest_notifiers(qbus->parent, nvqs, false);
vblk->dataplane_started = false;
s->stopping = false;
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
index 0714c11a2b..b1f0b95b32 100644
--- a/hw/block/dataplane/virtio-blk.h
+++ b/hw/block/dataplane/virtio-blk.h
@@ -26,6 +26,6 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_start(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_stop(VirtIOBlockDataPlane *s);
void virtio_blk_data_plane_drain(VirtIOBlockDataPlane *s);
-void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s);
+void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq);
#endif /* HW_DATAPLANE_VIRTIO_BLK_H */
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index 51d8596056..d9b27939dd 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -28,6 +28,7 @@
#include "hw/ssi/ssi.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
+#include "qapi/error.h"
#ifndef M25P80_ERR_DEBUG
#define M25P80_ERR_DEBUG 0
@@ -53,12 +54,17 @@
/* 16 MiB max in 3 byte address mode */
#define MAX_3BYTES_SIZE 0x1000000
+#define SPI_NOR_MAX_ID_LEN 6
+
typedef struct FlashPartInfo {
const char *part_name;
- /* jedec code. (jedec >> 16) & 0xff is the 1st byte, >> 8 the 2nd etc */
- uint32_t jedec;
- /* extended jedec code */
- uint16_t ext_jedec;
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ uint8_t id[SPI_NOR_MAX_ID_LEN];
+ uint8_t id_len;
/* there is confusion between manufacturers as to what a sector is. In this
* device model, a "sector" is the size that is erased by the ERASE_SECTOR
* command (opcode 0xd8).
@@ -70,11 +76,33 @@ typedef struct FlashPartInfo {
} FlashPartInfo;
/* adapted from linux */
-
-#define INFO(_part_name, _jedec, _ext_jedec, _sector_size, _n_sectors, _flags)\
- .part_name = (_part_name),\
- .jedec = (_jedec),\
- .ext_jedec = (_ext_jedec),\
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\
+ .part_name = _part_name,\
+ .id = {\
+ ((_jedec_id) >> 16) & 0xff,\
+ ((_jedec_id) >> 8) & 0xff,\
+ (_jedec_id) & 0xff,\
+ ((_ext_id) >> 8) & 0xff,\
+ (_ext_id) & 0xff,\
+ },\
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\
+ .sector_size = (_sector_size),\
+ .n_sectors = (_n_sectors),\
+ .page_size = 256,\
+ .flags = (_flags),
+
+#define INFO6(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\
+ .part_name = _part_name,\
+ .id = {\
+ ((_jedec_id) >> 16) & 0xff,\
+ ((_jedec_id) >> 8) & 0xff,\
+ (_jedec_id) & 0xff,\
+ ((_ext_id) >> 16) & 0xff,\
+ ((_ext_id) >> 8) & 0xff,\
+ (_ext_id) & 0xff,\
+ },\
+ .id_len = 6,\
.sector_size = (_sector_size),\
.n_sectors = (_n_sectors),\
.page_size = 256,\
@@ -102,12 +130,26 @@ typedef struct FlashPartInfo {
#define EVCFG_QUAD_IO_ENABLED (1 << 7)
#define NVCFG_4BYTE_ADDR_MASK (1 << 0)
#define NVCFG_LOWER_SEGMENT_MASK (1 << 1)
-#define CFG_UPPER_128MB_SEG_ENABLED 0x3
/* Numonyx (Micron) Flag Status Register macros */
#define FSR_4BYTE_ADDR_MODE_ENABLED 0x1
#define FSR_FLASH_READY (1 << 7)
+/* Spansion configuration registers macros. */
+#define SPANSION_QUAD_CFG_POS 0
+#define SPANSION_QUAD_CFG_LEN 1
+#define SPANSION_DUMMY_CLK_POS 0
+#define SPANSION_DUMMY_CLK_LEN 4
+#define SPANSION_ADDR_LEN_POS 7
+#define SPANSION_ADDR_LEN_LEN 1
+
+/*
+ * Spansion read mode command length in bytes,
+ * the mode is currently not supported.
+*/
+
+#define SPANSION_CONTINUOUS_READ_MODE_CMD_LEN 1
+
static const FlashPartInfo known_devices[] = {
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
{ INFO("at25fs010", 0x1f6601, 0, 32 << 10, 4, ER_4K) },
@@ -158,6 +200,8 @@ static const FlashPartInfo known_devices[] = {
{ INFO("mx25l12855e", 0xc22618, 0, 64 << 10, 256, 0) },
{ INFO("mx25l25635e", 0xc22019, 0, 64 << 10, 512, 0) },
{ INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) },
+ { INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) },
+ { INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) },
/* Micron */
{ INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) },
@@ -168,6 +212,11 @@ static const FlashPartInfo known_devices[] = {
{ INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) },
{ INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) },
{ INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
+ { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) },
+ { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
+ { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
+ { INFO("mt25ql01g", 0x20ba21, 0, 64 << 10, 2048, ER_4K) },
+ { INFO("mt25qu01g", 0x20bb21, 0, 64 << 10, 2048, ER_4K) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -176,8 +225,8 @@ static const FlashPartInfo known_devices[] = {
{ INFO("s25sl064p", 0x010216, 0x4d00, 64 << 10, 128, ER_4K) },
{ INFO("s25fl256s0", 0x010219, 0x4d00, 256 << 10, 128, 0) },
{ INFO("s25fl256s1", 0x010219, 0x4d01, 64 << 10, 512, 0) },
- { INFO("s25fl512s", 0x010220, 0x4d00, 256 << 10, 256, 0) },
- { INFO("s70fl01gs", 0x010221, 0x4d00, 256 << 10, 256, 0) },
+ { INFO6("s25fl512s", 0x010220, 0x4d0080, 256 << 10, 256, 0) },
+ { INFO6("s70fl01gs", 0x010221, 0x4d0080, 256 << 10, 512, 0) },
{ INFO("s25sl12800", 0x012018, 0x0300, 256 << 10, 64, 0) },
{ INFO("s25sl12801", 0x012018, 0x0301, 64 << 10, 256, 0) },
{ INFO("s25fl129p0", 0x012018, 0x4d00, 256 << 10, 64, 0) },
@@ -190,6 +239,10 @@ static const FlashPartInfo known_devices[] = {
{ INFO("s25fl016k", 0xef4015, 0, 64 << 10, 32, ER_4K | ER_32K) },
{ INFO("s25fl064k", 0xef4017, 0, 64 << 10, 128, ER_4K | ER_32K) },
+ /* Spansion -- boot sectors support */
+ { INFO6("s25fs512s", 0x010220, 0x4d0081, 256 << 10, 256, 0) },
+ { INFO6("s70fs01gs", 0x010221, 0x4d0081, 256 << 10, 512, 0) },
+
/* SST -- large erase sizes are "overlays", "sectors" are 4<< 10 */
{ INFO("sst25vf040b", 0xbf258d, 0, 64 << 10, 8, ER_4K) },
{ INFO("sst25vf080b", 0xbf258e, 0, 64 << 10, 16, ER_4K) },
@@ -240,10 +293,6 @@ static const FlashPartInfo known_devices[] = {
{ INFO("w25q80", 0xef5014, 0, 64 << 10, 16, ER_4K) },
{ INFO("w25q80bl", 0xef4014, 0, 64 << 10, 16, ER_4K) },
{ INFO("w25q256", 0xef4019, 0, 64 << 10, 512, ER_4K) },
-
- { INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) },
- { INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
- { INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
};
typedef enum {
@@ -255,6 +304,7 @@ typedef enum {
JEDEC_READ = 0x9f,
BULK_ERASE = 0xc7,
READ_FSR = 0x70,
+ RDCR = 0x15,
READ = 0x03,
READ4 = 0x13,
@@ -271,12 +321,14 @@ typedef enum {
PP = 0x02,
PP4 = 0x12,
+ PP4_4 = 0x3e,
DPP = 0xa2,
QPP = 0x32,
ERASE_4K = 0x20,
ERASE4_4K = 0x21,
ERASE_32K = 0x52,
+ ERASE4_32K = 0x5c,
ERASE_SECTOR = 0xd8,
ERASE4_SECTOR = 0xdc,
@@ -289,6 +341,13 @@ typedef enum {
RESET_ENABLE = 0x66,
RESET_MEMORY = 0x99,
+ /*
+ * Micron: 0x35 - enable QPI
+ * Spansion: 0x35 - read control register
+ */
+ RDCR_EQIO = 0x35,
+ RSTQIO = 0xf5,
+
RNVCR = 0xB5,
WNVCR = 0xB1,
@@ -304,9 +363,18 @@ typedef enum {
STATE_PAGE_PROGRAM,
STATE_READ,
STATE_COLLECTING_DATA,
+ STATE_COLLECTING_VAR_LEN_DATA,
STATE_READING_DATA,
} CMDState;
+typedef enum {
+ MAN_SPANSION,
+ MAN_MACRONIX,
+ MAN_NUMONYX,
+ MAN_WINBOND,
+ MAN_GENERIC,
+} Manufacturer;
+
typedef struct Flash {
SSISlave parent_obj;
@@ -322,13 +390,24 @@ typedef struct Flash {
uint32_t pos;
uint8_t needed_bytes;
uint8_t cmd_in_progress;
- uint64_t cur_addr;
+ uint32_t cur_addr;
uint32_t nonvolatile_cfg;
+ /* Configuration register for Macronix */
uint32_t volatile_cfg;
uint32_t enh_volatile_cfg;
+ /* Spansion cfg registers. */
+ uint8_t spansion_cr1nv;
+ uint8_t spansion_cr2nv;
+ uint8_t spansion_cr3nv;
+ uint8_t spansion_cr4nv;
+ uint8_t spansion_cr1v;
+ uint8_t spansion_cr2v;
+ uint8_t spansion_cr3v;
+ uint8_t spansion_cr4v;
bool write_enable;
bool four_bytes_address_mode;
bool reset_enable;
+ bool quad_enable;
uint8_t ear;
int64_t dirty_page;
@@ -350,8 +429,29 @@ typedef struct M25P80Class {
#define M25P80_GET_CLASS(obj) \
OBJECT_GET_CLASS(M25P80Class, (obj), TYPE_M25P80)
+static inline Manufacturer get_man(Flash *s)
+{
+ switch (s->pi->id[0]) {
+ case 0x20:
+ return MAN_NUMONYX;
+ case 0xEF:
+ return MAN_WINBOND;
+ case 0x01:
+ return MAN_SPANSION;
+ case 0xC2:
+ return MAN_MACRONIX;
+ default:
+ return MAN_GENERIC;
+ }
+}
+
static void blk_sync_complete(void *opaque, int ret)
{
+ QEMUIOVector *iov = opaque;
+
+ qemu_iovec_destroy(iov);
+ g_free(iov);
+
/* do nothing. Masters do not directly interact with the backing store,
* only the working copy so no mutexing required.
*/
@@ -359,31 +459,31 @@ static void blk_sync_complete(void *opaque, int ret)
static void flash_sync_page(Flash *s, int page)
{
- QEMUIOVector iov;
+ QEMUIOVector *iov = g_new(QEMUIOVector, 1);
if (!s->blk || blk_is_read_only(s->blk)) {
return;
}
- qemu_iovec_init(&iov, 1);
- qemu_iovec_add(&iov, s->storage + page * s->pi->page_size,
+ qemu_iovec_init(iov, 1);
+ qemu_iovec_add(iov, s->storage + page * s->pi->page_size,
s->pi->page_size);
- blk_aio_pwritev(s->blk, page * s->pi->page_size, &iov, 0,
- blk_sync_complete, NULL);
+ blk_aio_pwritev(s->blk, page * s->pi->page_size, iov, 0,
+ blk_sync_complete, iov);
}
static inline void flash_sync_area(Flash *s, int64_t off, int64_t len)
{
- QEMUIOVector iov;
+ QEMUIOVector *iov = g_new(QEMUIOVector, 1);
if (!s->blk || blk_is_read_only(s->blk)) {
return;
}
assert(!(len % BDRV_SECTOR_SIZE));
- qemu_iovec_init(&iov, 1);
- qemu_iovec_add(&iov, s->storage + off, len);
- blk_aio_pwritev(s->blk, off, &iov, 0, blk_sync_complete, NULL);
+ qemu_iovec_init(iov, 1);
+ qemu_iovec_add(iov, s->storage + off, len);
+ blk_aio_pwritev(s->blk, off, iov, 0, blk_sync_complete, iov);
}
static void flash_erase(Flash *s, int offset, FlashCMD cmd)
@@ -398,6 +498,7 @@ static void flash_erase(Flash *s, int offset, FlashCMD cmd)
capa_to_assert = ER_4K;
break;
case ERASE_32K:
+ case ERASE4_32K:
len = 32 << 10;
capa_to_assert = ER_32K;
break;
@@ -435,9 +536,9 @@ static inline void flash_sync_dirty(Flash *s, int64_t newpage)
}
static inline
-void flash_write8(Flash *s, uint64_t addr, uint8_t data)
+void flash_write8(Flash *s, uint32_t addr, uint8_t data)
{
- int64_t page = addr / s->pi->page_size;
+ uint32_t page = addr / s->pi->page_size;
uint8_t prev = s->storage[s->cur_addr];
if (!s->write_enable) {
@@ -445,7 +546,7 @@ void flash_write8(Flash *s, uint64_t addr, uint8_t data)
}
if ((prev ^ data) & data) {
- DB_PRINT_L(1, "programming zero to one! addr=%" PRIx64 " %" PRIx8
+ DB_PRINT_L(1, "programming zero to one! addr=%" PRIx32 " %" PRIx8
" -> %" PRIx8 "\n", addr, prev, data);
}
@@ -468,9 +569,11 @@ static inline int get_addr_length(Flash *s)
switch (s->cmd_in_progress) {
case PP4:
+ case PP4_4:
case READ4:
case QIOR4:
case ERASE4_4K:
+ case ERASE4_32K:
case ERASE4_SECTOR:
case FAST_READ4:
case DOR4:
@@ -484,18 +587,16 @@ static inline int get_addr_length(Flash *s)
static void complete_collecting_data(Flash *s)
{
- int i;
-
- s->cur_addr = 0;
+ int i, n;
- for (i = 0; i < get_addr_length(s); ++i) {
+ n = get_addr_length(s);
+ s->cur_addr = (n == 3 ? s->ear : 0);
+ for (i = 0; i < n; ++i) {
s->cur_addr <<= 8;
s->cur_addr |= s->data[i];
}
- if (get_addr_length(s) == 3) {
- s->cur_addr += (s->ear & 0x3) * MAX_3BYTES_SIZE;
- }
+ s->cur_addr &= s->size - 1;
s->state = STATE_IDLE;
@@ -504,6 +605,7 @@ static void complete_collecting_data(Flash *s)
case QPP:
case PP:
case PP4:
+ case PP4_4:
s->state = STATE_PAGE_PROGRAM;
break;
case READ:
@@ -523,11 +625,25 @@ static void complete_collecting_data(Flash *s)
case ERASE_4K:
case ERASE4_4K:
case ERASE_32K:
+ case ERASE4_32K:
case ERASE_SECTOR:
case ERASE4_SECTOR:
flash_erase(s, s->cur_addr, s->cmd_in_progress);
break;
case WRSR:
+ switch (get_man(s)) {
+ case MAN_SPANSION:
+ s->quad_enable = !!(s->data[1] & 0x02);
+ break;
+ case MAN_MACRONIX:
+ s->quad_enable = extract32(s->data[0], 6, 1);
+ if (s->len > 1) {
+ s->four_bytes_address_mode = extract32(s->data[1], 5, 1);
+ }
+ break;
+ default:
+ break;
+ }
if (s->write_enable) {
s->write_enable = false;
}
@@ -561,8 +677,10 @@ static void reset_memory(Flash *s)
s->state = STATE_IDLE;
s->write_enable = false;
s->reset_enable = false;
+ s->quad_enable = false;
- if (((s->pi->jedec >> 16) & 0xFF) == JEDEC_NUMONYX) {
+ switch (get_man(s)) {
+ case MAN_NUMONYX:
s->volatile_cfg = 0;
s->volatile_cfg |= VCFG_DUMMY;
s->volatile_cfg |= VCFG_WRAP_SEQUENTIAL;
@@ -592,16 +710,147 @@ static void reset_memory(Flash *s)
s->four_bytes_address_mode = true;
}
if (!(s->nonvolatile_cfg & NVCFG_LOWER_SEGMENT_MASK)) {
- s->ear = CFG_UPPER_128MB_SEG_ENABLED;
+ s->ear = s->size / MAX_3BYTES_SIZE - 1;
}
+ break;
+ case MAN_MACRONIX:
+ s->volatile_cfg = 0x7;
+ break;
+ case MAN_SPANSION:
+ s->spansion_cr1v = s->spansion_cr1nv;
+ s->spansion_cr2v = s->spansion_cr2nv;
+ s->spansion_cr3v = s->spansion_cr3nv;
+ s->spansion_cr4v = s->spansion_cr4nv;
+ s->quad_enable = extract32(s->spansion_cr1v,
+ SPANSION_QUAD_CFG_POS,
+ SPANSION_QUAD_CFG_LEN
+ );
+ s->four_bytes_address_mode = extract32(s->spansion_cr2v,
+ SPANSION_ADDR_LEN_POS,
+ SPANSION_ADDR_LEN_LEN
+ );
+ break;
+ default:
+ break;
}
DB_PRINT_L(0, "Reset done.\n");
}
+static void decode_fast_read_cmd(Flash *s)
+{
+ s->needed_bytes = get_addr_length(s);
+ switch (get_man(s)) {
+ /* Dummy cycles - modeled with bytes writes instead of bits */
+ case MAN_WINBOND:
+ s->needed_bytes += 8;
+ break;
+ case MAN_NUMONYX:
+ s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
+ break;
+ case MAN_MACRONIX:
+ if (extract32(s->volatile_cfg, 6, 2) == 1) {
+ s->needed_bytes += 6;
+ } else {
+ s->needed_bytes += 8;
+ }
+ break;
+ case MAN_SPANSION:
+ s->needed_bytes += extract32(s->spansion_cr2v,
+ SPANSION_DUMMY_CLK_POS,
+ SPANSION_DUMMY_CLK_LEN
+ );
+ break;
+ default:
+ break;
+ }
+ s->pos = 0;
+ s->len = 0;
+ s->state = STATE_COLLECTING_DATA;
+}
+
+static void decode_dio_read_cmd(Flash *s)
+{
+ s->needed_bytes = get_addr_length(s);
+ /* Dummy cycles modeled with bytes writes instead of bits */
+ switch (get_man(s)) {
+ case MAN_WINBOND:
+ s->needed_bytes += 8;
+ break;
+ case MAN_SPANSION:
+ s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN;
+ s->needed_bytes += extract32(s->spansion_cr2v,
+ SPANSION_DUMMY_CLK_POS,
+ SPANSION_DUMMY_CLK_LEN
+ );
+ break;
+ case MAN_NUMONYX:
+ s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
+ break;
+ case MAN_MACRONIX:
+ switch (extract32(s->volatile_cfg, 6, 2)) {
+ case 1:
+ s->needed_bytes += 6;
+ break;
+ case 2:
+ s->needed_bytes += 8;
+ break;
+ default:
+ s->needed_bytes += 4;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ s->pos = 0;
+ s->len = 0;
+ s->state = STATE_COLLECTING_DATA;
+}
+
+static void decode_qio_read_cmd(Flash *s)
+{
+ s->needed_bytes = get_addr_length(s);
+ /* Dummy cycles modeled with bytes writes instead of bits */
+ switch (get_man(s)) {
+ case MAN_WINBOND:
+ s->needed_bytes += 8;
+ break;
+ case MAN_SPANSION:
+ s->needed_bytes += SPANSION_CONTINUOUS_READ_MODE_CMD_LEN;
+ s->needed_bytes += extract32(s->spansion_cr2v,
+ SPANSION_DUMMY_CLK_POS,
+ SPANSION_DUMMY_CLK_LEN
+ );
+ break;
+ case MAN_NUMONYX:
+ s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
+ break;
+ case MAN_MACRONIX:
+ switch (extract32(s->volatile_cfg, 6, 2)) {
+ case 1:
+ s->needed_bytes += 4;
+ break;
+ case 2:
+ s->needed_bytes += 8;
+ break;
+ default:
+ s->needed_bytes += 6;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ s->pos = 0;
+ s->len = 0;
+ s->state = STATE_COLLECTING_DATA;
+}
+
static void decode_new_cmd(Flash *s, uint32_t value)
{
s->cmd_in_progress = value;
+ int i;
DB_PRINT_L(0, "decoded new command:%x\n", value);
if (value != RESET_MEMORY) {
@@ -613,6 +862,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case ERASE_4K:
case ERASE4_4K:
case ERASE_32K:
+ case ERASE4_32K:
case ERASE_SECTOR:
case ERASE4_SECTOR:
case READ:
@@ -621,6 +871,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case QPP:
case PP:
case PP4:
+ case PP4_4:
s->needed_bytes = get_addr_length(s);
s->pos = 0;
s->len = 0;
@@ -633,56 +884,35 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case DOR4:
case QOR:
case QOR4:
- s->needed_bytes = get_addr_length(s);
- if (((s->pi->jedec >> 16) & 0xFF) == JEDEC_NUMONYX) {
- /* Dummy cycles modeled with bytes writes instead of bits */
- s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
- }
- s->pos = 0;
- s->len = 0;
- s->state = STATE_COLLECTING_DATA;
+ decode_fast_read_cmd(s);
break;
case DIOR:
case DIOR4:
- switch ((s->pi->jedec >> 16) & 0xFF) {
- case JEDEC_WINBOND:
- case JEDEC_SPANSION:
- s->needed_bytes = 4;
- break;
- default:
- s->needed_bytes = get_addr_length(s);
- /* Dummy cycles modeled with bytes writes instead of bits */
- s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
- }
- s->pos = 0;
- s->len = 0;
- s->state = STATE_COLLECTING_DATA;
+ decode_dio_read_cmd(s);
break;
case QIOR:
case QIOR4:
- switch ((s->pi->jedec >> 16) & 0xFF) {
- case JEDEC_WINBOND:
- case JEDEC_SPANSION:
- s->needed_bytes = 6;
- break;
- default:
- s->needed_bytes = get_addr_length(s);
- /* Dummy cycles modeled with bytes writes instead of bits */
- s->needed_bytes += extract32(s->volatile_cfg, 4, 4);
- }
- s->pos = 0;
- s->len = 0;
- s->state = STATE_COLLECTING_DATA;
+ decode_qio_read_cmd(s);
break;
case WRSR:
if (s->write_enable) {
- s->needed_bytes = 1;
+ switch (get_man(s)) {
+ case MAN_SPANSION:
+ s->needed_bytes = 2;
+ s->state = STATE_COLLECTING_DATA;
+ break;
+ case MAN_MACRONIX:
+ s->needed_bytes = 2;
+ s->state = STATE_COLLECTING_VAR_LEN_DATA;
+ break;
+ default:
+ s->needed_bytes = 1;
+ s->state = STATE_COLLECTING_DATA;
+ }
s->pos = 0;
- s->len = 0;
- s->state = STATE_COLLECTING_DATA;
}
break;
@@ -695,6 +925,9 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case RDSR:
s->data[0] = (!!s->write_enable) << 1;
+ if (get_man(s) == MAN_MACRONIX) {
+ s->data[0] |= (!!s->quad_enable) << 6;
+ }
s->pos = 0;
s->len = 1;
s->state = STATE_READING_DATA;
@@ -712,17 +945,20 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case JEDEC_READ:
DB_PRINT_L(0, "populated jedec code\n");
- s->data[0] = (s->pi->jedec >> 16) & 0xff;
- s->data[1] = (s->pi->jedec >> 8) & 0xff;
- s->data[2] = s->pi->jedec & 0xff;
- if (s->pi->ext_jedec) {
- s->data[3] = (s->pi->ext_jedec >> 8) & 0xff;
- s->data[4] = s->pi->ext_jedec & 0xff;
- s->len = 5;
- } else {
- s->len = 3;
+ for (i = 0; i < s->pi->id_len; i++) {
+ s->data[i] = s->pi->id[i];
}
+
+ s->len = s->pi->id_len;
+ s->pos = 0;
+ s->state = STATE_READING_DATA;
+ break;
+
+ case RDCR:
+ s->data[0] = s->volatile_cfg & 0xFF;
+ s->data[0] |= (!!s->four_bytes_address_mode) << 5;
s->pos = 0;
+ s->len = 1;
s->state = STATE_READING_DATA;
break;
@@ -765,7 +1001,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
s->state = STATE_READING_DATA;
break;
case WNVCR:
- if (s->write_enable) {
+ if (s->write_enable && get_man(s) == MAN_NUMONYX) {
s->needed_bytes = 2;
s->pos = 0;
s->len = 0;
@@ -808,6 +1044,24 @@ static void decode_new_cmd(Flash *s, uint32_t value)
reset_memory(s);
}
break;
+ case RDCR_EQIO:
+ switch (get_man(s)) {
+ case MAN_SPANSION:
+ s->data[0] = (!!s->quad_enable) << 1;
+ s->pos = 0;
+ s->len = 1;
+ s->state = STATE_READING_DATA;
+ break;
+ case MAN_MACRONIX:
+ s->quad_enable = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case RSTQIO:
+ s->quad_enable = false;
+ break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "M25P80: Unknown cmd %x\n", value);
break;
@@ -819,6 +1073,9 @@ static int m25p80_cs(SSISlave *ss, bool select)
Flash *s = M25P80(ss);
if (select) {
+ if (s->state == STATE_COLLECTING_VAR_LEN_DATA) {
+ complete_collecting_data(s);
+ }
s->len = 0;
s->pos = 0;
s->state = STATE_IDLE;
@@ -838,20 +1095,21 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
switch (s->state) {
case STATE_PAGE_PROGRAM:
- DB_PRINT_L(1, "page program cur_addr=%#" PRIx64 " data=%" PRIx8 "\n",
+ DB_PRINT_L(1, "page program cur_addr=%#" PRIx32 " data=%" PRIx8 "\n",
s->cur_addr, (uint8_t)tx);
flash_write8(s, s->cur_addr, (uint8_t)tx);
- s->cur_addr++;
+ s->cur_addr = (s->cur_addr + 1) & (s->size - 1);
break;
case STATE_READ:
r = s->storage[s->cur_addr];
- DB_PRINT_L(1, "READ 0x%" PRIx64 "=%" PRIx8 "\n", s->cur_addr,
+ DB_PRINT_L(1, "READ 0x%" PRIx32 "=%" PRIx8 "\n", s->cur_addr,
(uint8_t)r);
- s->cur_addr = (s->cur_addr + 1) % s->size;
+ s->cur_addr = (s->cur_addr + 1) & (s->size - 1);
break;
case STATE_COLLECTING_DATA:
+ case STATE_COLLECTING_VAR_LEN_DATA:
s->data[s->len] = (uint8_t)tx;
s->len++;
@@ -878,9 +1136,8 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
return r;
}
-static int m25p80_init(SSISlave *ss)
+static void m25p80_realize(SSISlave *ss, Error **errp)
{
- DriveInfo *dinfo;
Flash *s = M25P80(ss);
M25P80Class *mc = M25P80_GET_CLASS(s);
@@ -889,28 +1146,19 @@ static int m25p80_init(SSISlave *ss)
s->size = s->pi->sector_size * s->pi->n_sectors;
s->dirty_page = -1;
- /* FIXME use a qdev drive property instead of drive_get_next() */
- dinfo = drive_get_next(IF_MTD);
-
- if (dinfo) {
+ if (s->blk) {
DB_PRINT_L(0, "Binding to IF_MTD drive\n");
- s->blk = blk_by_legacy_dinfo(dinfo);
- blk_attach_dev_nofail(s->blk, s);
-
s->storage = blk_blockalign(s->blk, s->size);
- /* FIXME: Move to late init */
if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) {
- fprintf(stderr, "Failed to initialize SPI flash!\n");
- return 1;
+ error_setg(errp, "failed to read the initial flash content");
+ return;
}
} else {
DB_PRINT_L(0, "No BDRV - binding to RAM\n");
s->storage = blk_blockalign(NULL, s->size);
memset(s->storage, 0xFF, s->size);
}
-
- return 0;
}
static void m25p80_reset(DeviceState *d)
@@ -926,13 +1174,19 @@ static void m25p80_pre_save(void *opaque)
}
static Property m25p80_properties[] = {
+ /* This is default value for Micron flash */
DEFINE_PROP_UINT32("nonvolatile-cfg", Flash, nonvolatile_cfg, 0x8FFF),
+ DEFINE_PROP_UINT8("spansion-cr1nv", Flash, spansion_cr1nv, 0x0),
+ DEFINE_PROP_UINT8("spansion-cr2nv", Flash, spansion_cr2nv, 0x8),
+ DEFINE_PROP_UINT8("spansion-cr3nv", Flash, spansion_cr3nv, 0x2),
+ DEFINE_PROP_UINT8("spansion-cr4nv", Flash, spansion_cr4nv, 0x10),
+ DEFINE_PROP_DRIVE("drive", Flash, blk),
DEFINE_PROP_END_OF_LIST(),
};
static const VMStateDescription vmstate_m25p80 = {
.name = "xilinx_spi",
- .version_id = 2,
+ .version_id = 3,
.minimum_version_id = 1,
.pre_save = m25p80_pre_save,
.fields = (VMStateField[]) {
@@ -942,7 +1196,8 @@ static const VMStateDescription vmstate_m25p80 = {
VMSTATE_UINT32(pos, Flash),
VMSTATE_UINT8(needed_bytes, Flash),
VMSTATE_UINT8(cmd_in_progress, Flash),
- VMSTATE_UINT64(cur_addr, Flash),
+ VMSTATE_UNUSED(4),
+ VMSTATE_UINT32(cur_addr, Flash),
VMSTATE_BOOL(write_enable, Flash),
VMSTATE_BOOL_V(reset_enable, Flash, 2),
VMSTATE_UINT8_V(ear, Flash, 2),
@@ -950,6 +1205,11 @@ static const VMStateDescription vmstate_m25p80 = {
VMSTATE_UINT32_V(nonvolatile_cfg, Flash, 2),
VMSTATE_UINT32_V(volatile_cfg, Flash, 2),
VMSTATE_UINT32_V(enh_volatile_cfg, Flash, 2),
+ VMSTATE_BOOL_V(quad_enable, Flash, 3),
+ VMSTATE_UINT8_V(spansion_cr1nv, Flash, 3),
+ VMSTATE_UINT8_V(spansion_cr2nv, Flash, 3),
+ VMSTATE_UINT8_V(spansion_cr3nv, Flash, 3),
+ VMSTATE_UINT8_V(spansion_cr4nv, Flash, 3),
VMSTATE_END_OF_LIST()
}
};
@@ -960,7 +1220,7 @@ static void m25p80_class_init(ObjectClass *klass, void *data)
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
M25P80Class *mc = M25P80_CLASS(klass);
- k->init = m25p80_init;
+ k->realize = m25p80_realize;
k->transfer = m25p80_transfer8;
k->set_cs = m25p80_cs;
k->cs_polarity = SSI_CS_LOW;
diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c
index 31585e3aa1..62d7a5661d 100644
--- a/hw/block/pflash_cfi01.c
+++ b/hw/block/pflash_cfi01.c
@@ -65,7 +65,6 @@ do { \
#define DPRINTF(fmt, ...) do { } while (0)
#endif
-#define TYPE_CFI_PFLASH01 "cfi.pflash01"
#define CFI_PFLASH01(obj) OBJECT_CHECK(pflash_t, (obj), TYPE_CFI_PFLASH01)
#define PFLASH_BE 0
diff --git a/hw/block/pflash_cfi02.c b/hw/block/pflash_cfi02.c
index 5f106102c5..4f6105cc58 100644
--- a/hw/block/pflash_cfi02.c
+++ b/hw/block/pflash_cfi02.c
@@ -57,7 +57,6 @@ do { \
#define PFLASH_LAZY_ROMD_THRESHOLD 42
-#define TYPE_CFI_PFLASH02 "cfi.pflash02"
#define CFI_PFLASH02(obj) OBJECT_CHECK(pflash_t, (obj), TYPE_CFI_PFLASH02)
struct pflash_t {
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 284e64667c..fb43bbaa46 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -29,9 +29,11 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
-void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req)
+void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
+ VirtIOBlockReq *req)
{
req->dev = s;
+ req->vq = vq;
req->qiov.size = 0;
req->in_len = 0;
req->next = NULL;
@@ -53,11 +55,11 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
trace_virtio_blk_req_complete(req, status);
stb_p(&req->in->status, status);
- virtqueue_push(s->vq, &req->elem, req->in_len);
+ virtqueue_push(req->vq, &req->elem, req->in_len);
if (s->dataplane_started && !s->dataplane_disabled) {
- virtio_blk_data_plane_notify(s->dataplane);
+ virtio_blk_data_plane_notify(s->dataplane, req->vq);
} else {
- virtio_notify(vdev, s->vq);
+ virtio_notify(vdev, req->vq);
}
}
@@ -187,12 +189,12 @@ out:
#endif
-static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
+static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s, VirtQueue *vq)
{
- VirtIOBlockReq *req = virtqueue_pop(s->vq, sizeof(VirtIOBlockReq));
+ VirtIOBlockReq *req = virtqueue_pop(vq, sizeof(VirtIOBlockReq));
if (req) {
- virtio_blk_init_request(s, req);
+ virtio_blk_init_request(s, vq, req);
}
return req;
}
@@ -583,7 +585,7 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
blk_io_plug(s->blk);
- while ((req = virtio_blk_get_request(s))) {
+ while ((req = virtio_blk_get_request(s, vq))) {
virtio_blk_handle_request(req, &mrb);
}
@@ -708,6 +710,7 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
blkcfg.physical_block_exp = get_physical_block_exp(conf);
blkcfg.alignment_offset = 0;
blkcfg.wce = blk_enable_write_cache(s->blk);
+ virtio_stw_p(vdev, &blkcfg.num_queues, s->conf.num_queues);
memcpy(config, &blkcfg, sizeof(struct virtio_blk_config));
}
@@ -751,6 +754,9 @@ static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
if (blk_is_read_only(s->blk)) {
virtio_add_feature(&features, VIRTIO_BLK_F_RO);
}
+ if (s->conf.num_queues > 1) {
+ virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
+ }
return features;
}
@@ -795,11 +801,6 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
static void virtio_blk_save(QEMUFile *f, void *opaque)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
- VirtIOBlock *s = VIRTIO_BLK(vdev);
-
- if (s->dataplane) {
- virtio_blk_data_plane_stop(s->dataplane);
- }
virtio_save(vdev, f);
}
@@ -811,6 +812,11 @@ static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
while (req) {
qemu_put_sbyte(f, 1);
+
+ if (s->conf.num_queues > 1) {
+ qemu_put_be32(f, virtio_get_queue_index(req->vq));
+ }
+
qemu_put_virtqueue_element(f, &req->elem);
req = req->next;
}
@@ -834,9 +840,22 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
VirtIOBlock *s = VIRTIO_BLK(vdev);
while (qemu_get_sbyte(f)) {
+ unsigned nvqs = s->conf.num_queues;
+ unsigned vq_idx = 0;
VirtIOBlockReq *req;
+
+ if (nvqs > 1) {
+ vq_idx = qemu_get_be32(f);
+
+ if (vq_idx >= nvqs) {
+ error_report("Invalid virtqueue index in request list: %#x",
+ vq_idx);
+ return -EINVAL;
+ }
+ }
+
req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq));
- virtio_blk_init_request(s, req);
+ virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
req->next = s->rq;
s->rq = req;
}
@@ -862,6 +881,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
VirtIOBlkConf *conf = &s->conf;
Error *err = NULL;
static int virtio_blk_id;
+ unsigned i;
if (!conf->conf.blk) {
error_setg(errp, "drive property not set");
@@ -871,6 +891,10 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
error_setg(errp, "Device needs media, but drive is empty");
return;
}
+ if (!conf->num_queues) {
+ error_setg(errp, "num-queues property must be larger than 0");
+ return;
+ }
blkconf_serial(&conf->conf, &conf->serial);
s->original_wce = blk_enable_write_cache(conf->conf.blk);
@@ -888,7 +912,9 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
s->rq = NULL;
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
- s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
+ for (i = 0; i < conf->num_queues; i++) {
+ virtio_add_queue(vdev, 128, virtio_blk_handle_output);
+ }
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
error_propagate(errp, err);
@@ -941,6 +967,7 @@ static Property virtio_blk_properties[] = {
#endif
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
true),
+ DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index c856fc30b2..e3bc52f7df 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -288,13 +288,19 @@ static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond,
}
ret = qemu_chr_fe_write(s->chr, s->tx_fifo, s->tx_count);
- s->tx_count -= ret;
- memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count);
+
+ if (ret >= 0) {
+ s->tx_count -= ret;
+ memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count);
+ }
if (s->tx_count) {
- int r = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
- cadence_uart_xmit, s);
- assert(r);
+ guint r = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
+ cadence_uart_xmit, s);
+ if (!r) {
+ s->tx_count = 0;
+ return FALSE;
+ }
}
uart_update_status(s);
diff --git a/hw/char/serial.c b/hw/char/serial.c
index 6d815b5c69..3442f47d36 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -106,6 +106,7 @@ do {} while (0)
#endif
static void serial_receive1(void *opaque, const uint8_t *buf, int size);
+static void serial_xmit(SerialState *s);
static inline void recv_fifo_put(SerialState *s, uint8_t chr)
{
@@ -223,13 +224,20 @@ static void serial_update_msl(SerialState *s)
}
}
-static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
+static gboolean serial_watch_cb(GIOChannel *chan, GIOCondition cond,
+ void *opaque)
{
SerialState *s = opaque;
+ s->watch_tag = 0;
+ serial_xmit(s);
+ return FALSE;
+}
+static void serial_xmit(SerialState *s)
+{
do {
assert(!(s->lsr & UART_LSR_TEMT));
- if (s->tsr_retry <= 0) {
+ if (s->tsr_retry == 0) {
assert(!(s->lsr & UART_LSR_THRE));
if (s->fcr & UART_FCR_FE) {
@@ -251,17 +259,17 @@ static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
if (s->mcr & UART_MCR_LOOP) {
/* in loopback mode, say that we just received a char */
serial_receive1(s, &s->tsr, 1);
- } else if (qemu_chr_fe_write(s->chr, &s->tsr, 1) != 1) {
- if (s->tsr_retry >= 0 && s->tsr_retry < MAX_XMIT_RETRY &&
- qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
- serial_xmit, s) > 0) {
+ } else if (qemu_chr_fe_write(s->chr, &s->tsr, 1) != 1 &&
+ s->tsr_retry < MAX_XMIT_RETRY) {
+ assert(s->watch_tag == 0);
+ s->watch_tag = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
+ serial_watch_cb, s);
+ if (s->watch_tag > 0) {
s->tsr_retry++;
- return FALSE;
+ return;
}
- s->tsr_retry = 0;
- } else {
- s->tsr_retry = 0;
}
+ s->tsr_retry = 0;
/* Transmit another byte if it is already available. It is only
possible when FIFO is enabled and not empty. */
@@ -269,11 +277,8 @@ static gboolean serial_xmit(GIOChannel *chan, GIOCondition cond, void *opaque)
s->last_xmit_ts = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
s->lsr |= UART_LSR_TEMT;
-
- return FALSE;
}
-
/* Setter for FCR.
is_load flag means, that value is set while loading VM state
and interrupt should not be invoked */
@@ -330,8 +335,8 @@ static void serial_ioport_write(void *opaque, hwaddr addr, uint64_t val,
s->lsr &= ~UART_LSR_THRE;
s->lsr &= ~UART_LSR_TEMT;
serial_update_irq(s);
- if (s->tsr_retry <= 0) {
- serial_xmit(NULL, G_IO_OUT, s);
+ if (s->tsr_retry == 0) {
+ serial_xmit(s);
}
}
break;
@@ -639,6 +644,31 @@ static int serial_post_load(void *opaque, int version_id)
if (s->thr_ipending == -1) {
s->thr_ipending = ((s->iir & UART_IIR_ID) == UART_IIR_THRI);
}
+
+ if (s->tsr_retry > 0) {
+ /* tsr_retry > 0 implies LSR.TEMT = 0 (transmitter not empty). */
+ if (s->lsr & UART_LSR_TEMT) {
+ error_report("inconsistent state in serial device "
+ "(tsr empty, tsr_retry=%d", s->tsr_retry);
+ return -1;
+ }
+
+ if (s->tsr_retry > MAX_XMIT_RETRY) {
+ s->tsr_retry = MAX_XMIT_RETRY;
+ }
+
+ assert(s->watch_tag == 0);
+ s->watch_tag = qemu_chr_fe_add_watch(s->chr, G_IO_OUT|G_IO_HUP,
+ serial_watch_cb, s);
+ } else {
+ /* tsr_retry == 0 implies LSR.TEMT = 1 (transmitter empty). */
+ if (!(s->lsr & UART_LSR_TEMT)) {
+ error_report("inconsistent state in serial device "
+ "(tsr not empty, tsr_retry=0");
+ return -1;
+ }
+ }
+
s->last_break_enable = (s->lcr >> 6) & 1;
/* Initialize fcr via setter to perform essential side-effects */
serial_write_fcr(s, s->fcr_vmstate);
@@ -685,7 +715,7 @@ static const VMStateDescription vmstate_serial_tsr = {
.minimum_version_id = 1,
.needed = serial_tsr_needed,
.fields = (VMStateField[]) {
- VMSTATE_INT32(tsr_retry, SerialState),
+ VMSTATE_UINT32(tsr_retry, SerialState),
VMSTATE_UINT8(thr, SerialState),
VMSTATE_UINT8(tsr, SerialState),
VMSTATE_END_OF_LIST()
@@ -815,6 +845,11 @@ static void serial_reset(void *opaque)
{
SerialState *s = opaque;
+ if (s->watch_tag > 0) {
+ g_source_remove(s->watch_tag);
+ s->watch_tag = 0;
+ }
+
s->rbr = 0;
s->ier = 0;
s->iir = UART_IIR_NO_INT;
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index 82a9ef84f8..cfd4840397 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -15,4 +15,5 @@ common-obj-$(CONFIG_SOFTMMU) += machine.o
common-obj-$(CONFIG_SOFTMMU) += null-machine.o
common-obj-$(CONFIG_SOFTMMU) += loader.o
common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o
+common-obj-$(CONFIG_SOFTMMU) += register.o
common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o
diff --git a/hw/core/register.c b/hw/core/register.c
new file mode 100644
index 0000000000..4bfbc508de
--- /dev/null
+++ b/hw/core/register.c
@@ -0,0 +1,287 @@
+/*
+ * Register Definition API
+ *
+ * Copyright (c) 2016 Xilinx Inc.
+ * Copyright (c) 2013 Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/register.h"
+#include "hw/qdev.h"
+#include "qemu/log.h"
+
+static inline void register_write_val(RegisterInfo *reg, uint64_t val)
+{
+ g_assert(reg->data);
+
+ switch (reg->data_size) {
+ case 1:
+ *(uint8_t *)reg->data = val;
+ break;
+ case 2:
+ *(uint16_t *)reg->data = val;
+ break;
+ case 4:
+ *(uint32_t *)reg->data = val;
+ break;
+ case 8:
+ *(uint64_t *)reg->data = val;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline uint64_t register_read_val(RegisterInfo *reg)
+{
+ switch (reg->data_size) {
+ case 1:
+ return *(uint8_t *)reg->data;
+ case 2:
+ return *(uint16_t *)reg->data;
+ case 4:
+ return *(uint32_t *)reg->data;
+ case 8:
+ return *(uint64_t *)reg->data;
+ default:
+ g_assert_not_reached();
+ }
+ return 0; /* unreachable */
+}
+
+void register_write(RegisterInfo *reg, uint64_t val, uint64_t we,
+ const char *prefix, bool debug)
+{
+ uint64_t old_val, new_val, test, no_w_mask;
+ const RegisterAccessInfo *ac;
+
+ assert(reg);
+
+ ac = reg->access;
+
+ if (!ac || !ac->name) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: write to undefined device state "
+ "(written value: %#" PRIx64 ")\n", prefix, val);
+ return;
+ }
+
+ old_val = reg->data ? register_read_val(reg) : ac->reset;
+
+ test = (old_val ^ val) & ac->rsvd;
+ if (test) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: change of value in reserved bit"
+ "fields: %#" PRIx64 ")\n", prefix, test);
+ }
+
+ test = val & ac->unimp;
+ if (test) {
+ qemu_log_mask(LOG_UNIMP,
+ "%s:%s writing %#" PRIx64 " to unimplemented bits:" \
+ " %#" PRIx64 "",
+ prefix, reg->access->name, val, ac->unimp);
+ }
+
+ /* Create the no write mask based on the read only, write to clear and
+ * reserved bit masks.
+ */
+ no_w_mask = ac->ro | ac->w1c | ac->rsvd | ~we;
+ new_val = (val & ~no_w_mask) | (old_val & no_w_mask);
+ new_val &= ~(val & ac->w1c);
+
+ if (ac->pre_write) {
+ new_val = ac->pre_write(reg, new_val);
+ }
+
+ if (debug) {
+ qemu_log("%s:%s: write of value %#" PRIx64 "\n", prefix, ac->name,
+ new_val);
+ }
+
+ register_write_val(reg, new_val);
+
+ if (ac->post_write) {
+ ac->post_write(reg, new_val);
+ }
+}
+
+uint64_t register_read(RegisterInfo *reg, uint64_t re, const char* prefix,
+ bool debug)
+{
+ uint64_t ret;
+ const RegisterAccessInfo *ac;
+
+ assert(reg);
+
+ ac = reg->access;
+ if (!ac || !ac->name) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: read from undefined device state\n",
+ prefix);
+ return 0;
+ }
+
+ ret = reg->data ? register_read_val(reg) : ac->reset;
+
+ register_write_val(reg, ret & ~(ac->cor & re));
+
+ /* Mask based on the read enable size */
+ ret &= re;
+
+ if (ac->post_read) {
+ ret = ac->post_read(reg, ret);
+ }
+
+ if (debug) {
+ qemu_log("%s:%s: read of value %#" PRIx64 "\n", prefix,
+ ac->name, ret);
+ }
+
+ return ret;
+}
+
+void register_reset(RegisterInfo *reg)
+{
+ g_assert(reg);
+
+ if (!reg->data || !reg->access) {
+ return;
+ }
+
+ register_write_val(reg, reg->access->reset);
+}
+
+void register_init(RegisterInfo *reg)
+{
+ assert(reg);
+
+ if (!reg->data || !reg->access) {
+ return;
+ }
+
+ object_initialize((void *)reg, sizeof(*reg), TYPE_REGISTER);
+}
+
+void register_write_memory(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ RegisterInfoArray *reg_array = opaque;
+ RegisterInfo *reg = NULL;
+ uint64_t we;
+ int i;
+
+ for (i = 0; i < reg_array->num_elements; i++) {
+ if (reg_array->r[i]->access->addr == addr) {
+ reg = reg_array->r[i];
+ break;
+ }
+ }
+
+ if (!reg) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Write to unimplemented register at " \
+ "address: %#" PRIx64 "\n", addr);
+ return;
+ }
+
+ /* Generate appropriate write enable mask */
+ if (reg->data_size < size) {
+ we = MAKE_64BIT_MASK(0, reg->data_size * 8);
+ } else {
+ we = MAKE_64BIT_MASK(0, size * 8);
+ }
+
+ register_write(reg, value, we, reg_array->prefix,
+ reg_array->debug);
+}
+
+uint64_t register_read_memory(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ RegisterInfoArray *reg_array = opaque;
+ RegisterInfo *reg = NULL;
+ uint64_t read_val;
+ int i;
+
+ for (i = 0; i < reg_array->num_elements; i++) {
+ if (reg_array->r[i]->access->addr == addr) {
+ reg = reg_array->r[i];
+ break;
+ }
+ }
+
+ if (!reg) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Read to unimplemented register at " \
+ "address: %#" PRIx64 "\n", addr);
+ return 0;
+ }
+
+ read_val = register_read(reg, size * 8, reg_array->prefix,
+ reg_array->debug);
+
+ return extract64(read_val, 0, size * 8);
+}
+
+RegisterInfoArray *register_init_block32(DeviceState *owner,
+ const RegisterAccessInfo *rae,
+ int num, RegisterInfo *ri,
+ uint32_t *data,
+ const MemoryRegionOps *ops,
+ bool debug_enabled,
+ uint64_t memory_size)
+{
+ const char *device_prefix = object_get_typename(OBJECT(owner));
+ RegisterInfoArray *r_array = g_new0(RegisterInfoArray, 1);
+ int i;
+
+ r_array->r = g_new0(RegisterInfo *, num);
+ r_array->num_elements = num;
+ r_array->debug = debug_enabled;
+ r_array->prefix = device_prefix;
+
+ for (i = 0; i < num; i++) {
+ int index = rae[i].addr / 4;
+ RegisterInfo *r = &ri[index];
+
+ *r = (RegisterInfo) {
+ .data = &data[index],
+ .data_size = sizeof(uint32_t),
+ .access = &rae[i],
+ .opaque = owner,
+ };
+ register_init(r);
+
+ r_array->r[i] = r;
+ }
+
+ memory_region_init_io(&r_array->mem, OBJECT(owner), ops, r_array,
+ device_prefix, memory_size);
+
+ return r_array;
+}
+
+void register_finalize_block(RegisterInfoArray *r_array)
+{
+ object_unparent(OBJECT(&r_array->mem));
+ g_free(r_array->r);
+ g_free(r_array);
+}
+
+static const TypeInfo register_info = {
+ .name = TYPE_REGISTER,
+ .parent = TYPE_DEVICE,
+};
+
+static void register_register_types(void)
+{
+ type_register_static(&register_info);
+}
+
+type_init(register_register_types)
diff --git a/hw/display/ads7846.c b/hw/display/ads7846.c
index 05aa2d1e6b..166edade7d 100644
--- a/hw/display/ads7846.c
+++ b/hw/display/ads7846.c
@@ -133,7 +133,7 @@ static const VMStateDescription vmstate_ads7846 = {
}
};
-static int ads7846_init(SSISlave *d)
+static void ads7846_realize(SSISlave *d, Error **errp)
{
DeviceState *dev = DEVICE(d);
ADS7846State *s = FROM_SSI_SLAVE(ADS7846State, d);
@@ -152,14 +152,13 @@ static int ads7846_init(SSISlave *d)
ads7846_int_update(s);
vmstate_register(NULL, -1, &vmstate_ads7846, s);
- return 0;
}
static void ads7846_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = ads7846_init;
+ k->realize = ads7846_realize;
k->transfer = ads7846_transfer;
}
diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c
index 14c1bf339c..6d1faf44af 100644
--- a/hw/display/ssd0323.c
+++ b/hw/display/ssd0323.c
@@ -361,7 +361,7 @@ static const GraphicHwOps ssd0323_ops = {
.gfx_update = ssd0323_update_display,
};
-static int ssd0323_init(SSISlave *d)
+static void ssd0323_realize(SSISlave *d, Error **errp)
{
DeviceState *dev = DEVICE(d);
ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, d);
@@ -375,14 +375,13 @@ static int ssd0323_init(SSISlave *d)
register_savevm(dev, "ssd0323_oled", -1, 1,
ssd0323_save, ssd0323_load, s);
- return 0;
}
static void ssd0323_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = ssd0323_init;
+ k->realize = ssd0323_realize;
k->transfer = ssd0323_transfer;
k->cs_polarity = SSI_CS_HIGH;
}
diff --git a/hw/dma/Makefile.objs b/hw/dma/Makefile.objs
index 8b0823e593..087c8e6855 100644
--- a/hw/dma/Makefile.objs
+++ b/hw/dma/Makefile.objs
@@ -5,6 +5,7 @@ common-obj-$(CONFIG_PL330) += pl330.o
common-obj-$(CONFIG_I82374) += i82374.o
common-obj-$(CONFIG_I8257) += i8257.o
common-obj-$(CONFIG_XILINX_AXI) += xilinx_axidma.o
+common-obj-$(CONFIG_ZYNQ_DEVCFG) += xlnx-zynq-devcfg.o
common-obj-$(CONFIG_ETRAXFS) += etraxfs_dma.o
common-obj-$(CONFIG_STP2000) += sparc32_dma.o
common-obj-$(CONFIG_SUN4M) += sun4m_iommu.o
diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c
new file mode 100644
index 0000000000..3b10523430
--- /dev/null
+++ b/hw/dma/xlnx-zynq-devcfg.c
@@ -0,0 +1,400 @@
+/*
+ * QEMU model of the Xilinx Zynq Devcfg Interface
+ *
+ * (C) 2011 PetaLogix Pty Ltd
+ * (C) 2014 Xilinx Inc.
+ * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/dma/xlnx-zynq-devcfg.h"
+#include "qemu/bitops.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/dma.h"
+#include "qemu/log.h"
+
+#define FREQ_HZ 900000000
+
+#define BTT_MAX 0x400
+
+#ifndef XLNX_ZYNQ_DEVCFG_ERR_DEBUG
+#define XLNX_ZYNQ_DEVCFG_ERR_DEBUG 0
+#endif
+
+#define DB_PRINT(fmt, args...) do { \
+ if (XLNX_ZYNQ_DEVCFG_ERR_DEBUG) { \
+ qemu_log("%s: " fmt, __func__, ## args); \
+ } \
+} while (0);
+
+REG32(CTRL, 0x00)
+ FIELD(CTRL, FORCE_RST, 31, 1) /* Not supported, wr ignored */
+ FIELD(CTRL, PCAP_PR, 27, 1) /* Forced to 0 on bad unlock */
+ FIELD(CTRL, PCAP_MODE, 26, 1)
+ FIELD(CTRL, MULTIBOOT_EN, 24, 1)
+ FIELD(CTRL, USER_MODE, 15, 1)
+ FIELD(CTRL, PCFG_AES_FUSE, 12, 1)
+ FIELD(CTRL, PCFG_AES_EN, 9, 3)
+ FIELD(CTRL, SEU_EN, 8, 1)
+ FIELD(CTRL, SEC_EN, 7, 1)
+ FIELD(CTRL, SPNIDEN, 6, 1)
+ FIELD(CTRL, SPIDEN, 5, 1)
+ FIELD(CTRL, NIDEN, 4, 1)
+ FIELD(CTRL, DBGEN, 3, 1)
+ FIELD(CTRL, DAP_EN, 0, 3)
+
+REG32(LOCK, 0x04)
+#define AES_FUSE_LOCK 4
+#define AES_EN_LOCK 3
+#define SEU_LOCK 2
+#define SEC_LOCK 1
+#define DBG_LOCK 0
+
+/* mapping bits in R_LOCK to what they lock in R_CTRL */
+static const uint32_t lock_ctrl_map[] = {
+ [AES_FUSE_LOCK] = R_CTRL_PCFG_AES_FUSE_MASK,
+ [AES_EN_LOCK] = R_CTRL_PCFG_AES_EN_MASK,
+ [SEU_LOCK] = R_CTRL_SEU_EN_MASK,
+ [SEC_LOCK] = R_CTRL_SEC_EN_MASK,
+ [DBG_LOCK] = R_CTRL_SPNIDEN_MASK | R_CTRL_SPIDEN_MASK |
+ R_CTRL_NIDEN_MASK | R_CTRL_DBGEN_MASK |
+ R_CTRL_DAP_EN_MASK,
+};
+
+REG32(CFG, 0x08)
+ FIELD(CFG, RFIFO_TH, 10, 2)
+ FIELD(CFG, WFIFO_TH, 8, 2)
+ FIELD(CFG, RCLK_EDGE, 7, 1)
+ FIELD(CFG, WCLK_EDGE, 6, 1)
+ FIELD(CFG, DISABLE_SRC_INC, 5, 1)
+ FIELD(CFG, DISABLE_DST_INC, 4, 1)
+#define R_CFG_RESET 0x50B
+
+REG32(INT_STS, 0x0C)
+ FIELD(INT_STS, PSS_GTS_USR_B, 31, 1)
+ FIELD(INT_STS, PSS_FST_CFG_B, 30, 1)
+ FIELD(INT_STS, PSS_CFG_RESET_B, 27, 1)
+ FIELD(INT_STS, RX_FIFO_OV, 18, 1)
+ FIELD(INT_STS, WR_FIFO_LVL, 17, 1)
+ FIELD(INT_STS, RD_FIFO_LVL, 16, 1)
+ FIELD(INT_STS, DMA_CMD_ERR, 15, 1)
+ FIELD(INT_STS, DMA_Q_OV, 14, 1)
+ FIELD(INT_STS, DMA_DONE, 13, 1)
+ FIELD(INT_STS, DMA_P_DONE, 12, 1)
+ FIELD(INT_STS, P2D_LEN_ERR, 11, 1)
+ FIELD(INT_STS, PCFG_DONE, 2, 1)
+#define R_INT_STS_RSVD ((0x7 << 24) | (0x1 << 19) | (0xF < 7))
+
+REG32(INT_MASK, 0x10)
+
+REG32(STATUS, 0x14)
+ FIELD(STATUS, DMA_CMD_Q_F, 31, 1)
+ FIELD(STATUS, DMA_CMD_Q_E, 30, 1)
+ FIELD(STATUS, DMA_DONE_CNT, 28, 2)
+ FIELD(STATUS, RX_FIFO_LVL, 20, 5)
+ FIELD(STATUS, TX_FIFO_LVL, 12, 7)
+ FIELD(STATUS, PSS_GTS_USR_B, 11, 1)
+ FIELD(STATUS, PSS_FST_CFG_B, 10, 1)
+ FIELD(STATUS, PSS_CFG_RESET_B, 5, 1)
+
+REG32(DMA_SRC_ADDR, 0x18)
+REG32(DMA_DST_ADDR, 0x1C)
+REG32(DMA_SRC_LEN, 0x20)
+REG32(DMA_DST_LEN, 0x24)
+REG32(ROM_SHADOW, 0x28)
+REG32(SW_ID, 0x30)
+REG32(UNLOCK, 0x34)
+
+#define R_UNLOCK_MAGIC 0x757BDF0D
+
+REG32(MCTRL, 0x80)
+ FIELD(MCTRL, PS_VERSION, 28, 4)
+ FIELD(MCTRL, PCFG_POR_B, 8, 1)
+ FIELD(MCTRL, INT_PCAP_LPBK, 4, 1)
+ FIELD(MCTRL, QEMU, 3, 1)
+
+static void xlnx_zynq_devcfg_update_ixr(XlnxZynqDevcfg *s)
+{
+ qemu_set_irq(s->irq, ~s->regs[R_INT_MASK] & s->regs[R_INT_STS]);
+}
+
+static void xlnx_zynq_devcfg_reset(DeviceState *dev)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(dev);
+ int i;
+
+ for (i = 0; i < XLNX_ZYNQ_DEVCFG_R_MAX; ++i) {
+ register_reset(&s->regs_info[i]);
+ }
+}
+
+static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s)
+{
+ do {
+ uint8_t buf[BTT_MAX];
+ XlnxZynqDevcfgDMACmd *dmah = s->dma_cmd_fifo;
+ uint32_t btt = BTT_MAX;
+ bool loopback = s->regs[R_MCTRL] & R_MCTRL_INT_PCAP_LPBK_MASK;
+
+ btt = MIN(btt, dmah->src_len);
+ if (loopback) {
+ btt = MIN(btt, dmah->dest_len);
+ }
+ DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr);
+ dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt);
+ dmah->src_len -= btt;
+ dmah->src_addr += btt;
+ if (loopback && (dmah->src_len || dmah->dest_len)) {
+ DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr);
+ dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt);
+ dmah->dest_len -= btt;
+ dmah->dest_addr += btt;
+ }
+ if (!dmah->src_len && !dmah->dest_len) {
+ DB_PRINT("dma operation finished\n");
+ s->regs[R_INT_STS] |= R_INT_STS_DMA_DONE_MASK |
+ R_INT_STS_DMA_P_DONE_MASK;
+ s->dma_cmd_fifo_num--;
+ memmove(s->dma_cmd_fifo, &s->dma_cmd_fifo[1],
+ sizeof(s->dma_cmd_fifo) - sizeof(s->dma_cmd_fifo[0]));
+ }
+ xlnx_zynq_devcfg_update_ixr(s);
+ } while (s->dma_cmd_fifo_num);
+}
+
+static void r_ixr_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+
+ xlnx_zynq_devcfg_update_ixr(s);
+}
+
+static uint64_t r_ctrl_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lock_ctrl_map); ++i) {
+ if (s->regs[R_LOCK] & 1 << i) {
+ val &= ~lock_ctrl_map[i];
+ val |= lock_ctrl_map[i] & s->regs[R_CTRL];
+ }
+ }
+ return val;
+}
+
+static void r_ctrl_post_write(RegisterInfo *reg, uint64_t val)
+{
+ const char *device_prefix = object_get_typename(OBJECT(reg->opaque));
+ uint32_t aes_en = FIELD_EX32(val, CTRL, PCFG_AES_EN);
+
+ if (aes_en != 0 && aes_en != 7) {
+ qemu_log_mask(LOG_UNIMP, "%s: warning, aes-en bits inconsistent,"
+ "unimplemented security reset should happen!\n",
+ device_prefix);
+ }
+}
+
+static void r_unlock_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+ const char *device_prefix = object_get_typename(OBJECT(s));
+
+ if (val == R_UNLOCK_MAGIC) {
+ DB_PRINT("successful unlock\n");
+ s->regs[R_CTRL] |= R_CTRL_PCAP_PR_MASK;
+ s->regs[R_CTRL] |= R_CTRL_PCFG_AES_EN_MASK;
+ memory_region_set_enabled(&s->iomem, true);
+ } else { /* bad unlock attempt */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: failed unlock\n", device_prefix);
+ s->regs[R_CTRL] &= ~R_CTRL_PCAP_PR_MASK;
+ s->regs[R_CTRL] &= ~R_CTRL_PCFG_AES_EN_MASK;
+ /* core becomes inaccessible */
+ memory_region_set_enabled(&s->iomem, false);
+ }
+}
+
+static uint64_t r_lock_pre_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+
+ /* once bits are locked they stay locked */
+ return s->regs[R_LOCK] | val;
+}
+
+static void r_dma_dst_len_post_write(RegisterInfo *reg, uint64_t val)
+{
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(reg->opaque);
+
+ s->dma_cmd_fifo[s->dma_cmd_fifo_num] = (XlnxZynqDevcfgDMACmd) {
+ .src_addr = s->regs[R_DMA_SRC_ADDR] & ~0x3UL,
+ .dest_addr = s->regs[R_DMA_DST_ADDR] & ~0x3UL,
+ .src_len = s->regs[R_DMA_SRC_LEN] << 2,
+ .dest_len = s->regs[R_DMA_DST_LEN] << 2,
+ };
+ s->dma_cmd_fifo_num++;
+ DB_PRINT("dma transfer started; %d total transfers pending\n",
+ s->dma_cmd_fifo_num);
+ xlnx_zynq_devcfg_dma_go(s);
+}
+
+static const RegisterAccessInfo xlnx_zynq_devcfg_regs_info[] = {
+ { .name = "CTRL", .addr = A_CTRL,
+ .reset = R_CTRL_PCAP_PR_MASK | R_CTRL_PCAP_MODE_MASK | 0x3 << 13,
+ .rsvd = 0x1 << 28 | 0x3ff << 13 | 0x3 << 13,
+ .pre_write = r_ctrl_pre_write,
+ .post_write = r_ctrl_post_write,
+ },
+ { .name = "LOCK", .addr = A_LOCK,
+ .rsvd = MAKE_64BIT_MASK(5, 64 - 5),
+ .pre_write = r_lock_pre_write,
+ },
+ { .name = "CFG", .addr = A_CFG,
+ .reset = R_CFG_RESET,
+ .rsvd = 0xfffff00f,
+ },
+ { .name = "INT_STS", .addr = A_INT_STS,
+ .w1c = ~R_INT_STS_RSVD,
+ .reset = R_INT_STS_PSS_GTS_USR_B_MASK |
+ R_INT_STS_PSS_CFG_RESET_B_MASK |
+ R_INT_STS_WR_FIFO_LVL_MASK,
+ .rsvd = R_INT_STS_RSVD,
+ .post_write = r_ixr_post_write,
+ },
+ { .name = "INT_MASK", .addr = A_INT_MASK,
+ .reset = ~0,
+ .rsvd = R_INT_STS_RSVD,
+ .post_write = r_ixr_post_write,
+ },
+ { .name = "STATUS", .addr = A_STATUS,
+ .reset = R_STATUS_DMA_CMD_Q_E_MASK |
+ R_STATUS_PSS_GTS_USR_B_MASK |
+ R_STATUS_PSS_CFG_RESET_B_MASK,
+ .ro = ~0,
+ },
+ { .name = "DMA_SRC_ADDR", .addr = A_DMA_SRC_ADDR, },
+ { .name = "DMA_DST_ADDR", .addr = A_DMA_DST_ADDR, },
+ { .name = "DMA_SRC_LEN", .addr = A_DMA_SRC_LEN,
+ .ro = MAKE_64BIT_MASK(27, 64 - 27) },
+ { .name = "DMA_DST_LEN", .addr = A_DMA_DST_LEN,
+ .ro = MAKE_64BIT_MASK(27, 64 - 27),
+ .post_write = r_dma_dst_len_post_write,
+ },
+ { .name = "ROM_SHADOW", .addr = A_ROM_SHADOW,
+ .rsvd = ~0ull,
+ },
+ { .name = "SW_ID", .addr = A_SW_ID, },
+ { .name = "UNLOCK", .addr = A_UNLOCK,
+ .post_write = r_unlock_post_write,
+ },
+ { .name = "MCTRL", .addr = R_MCTRL * 4,
+ /* Silicon 3.0 for version field, the mysterious reserved bit 23
+ * and QEMU platform identifier.
+ */
+ .reset = 0x2 << R_MCTRL_PS_VERSION_SHIFT | 1 << 23 | R_MCTRL_QEMU_MASK,
+ .ro = ~R_MCTRL_INT_PCAP_LPBK_MASK,
+ .rsvd = 0x00f00303,
+ },
+};
+
+static const MemoryRegionOps xlnx_zynq_devcfg_reg_ops = {
+ .read = register_read_memory,
+ .write = register_write_memory,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = {
+ .name = "xlnx_zynq_devcfg_dma_cmd",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT32(dest_len, XlnxZynqDevcfgDMACmd),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_xlnx_zynq_devcfg = {
+ .name = "xlnx_zynq_devcfg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg,
+ XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0,
+ vmstate_xlnx_zynq_devcfg_dma_cmd,
+ XlnxZynqDevcfgDMACmd),
+ VMSTATE_UINT8(dma_cmd_fifo_num, XlnxZynqDevcfg),
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_R_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void xlnx_zynq_devcfg_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ XlnxZynqDevcfg *s = XLNX_ZYNQ_DEVCFG(obj);
+ RegisterInfoArray *reg_array;
+
+ sysbus_init_irq(sbd, &s->irq);
+
+ memory_region_init(&s->iomem, obj, "devcfg", XLNX_ZYNQ_DEVCFG_R_MAX * 4);
+ reg_array =
+ register_init_block32(DEVICE(obj), xlnx_zynq_devcfg_regs_info,
+ ARRAY_SIZE(xlnx_zynq_devcfg_regs_info),
+ s->regs_info, s->regs,
+ &xlnx_zynq_devcfg_reg_ops,
+ XLNX_ZYNQ_DEVCFG_ERR_DEBUG,
+ XLNX_ZYNQ_DEVCFG_R_MAX);
+ memory_region_add_subregion(&s->iomem,
+ A_CTRL,
+ &reg_array->mem);
+
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static void xlnx_zynq_devcfg_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = xlnx_zynq_devcfg_reset;
+ dc->vmsd = &vmstate_xlnx_zynq_devcfg;
+}
+
+static const TypeInfo xlnx_zynq_devcfg_info = {
+ .name = TYPE_XLNX_ZYNQ_DEVCFG,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(XlnxZynqDevcfg),
+ .instance_init = xlnx_zynq_devcfg_init,
+ .class_init = xlnx_zynq_devcfg_class_init,
+};
+
+static void xlnx_zynq_devcfg_register_types(void)
+{
+ type_register_static(&xlnx_zynq_devcfg_info);
+}
+
+type_init(xlnx_zynq_devcfg_register_types)
diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c
index 498f03e835..48fab22625 100644
--- a/hw/i2c/smbus_ich9.c
+++ b/hw/i2c/smbus_ich9.c
@@ -35,7 +35,6 @@
#include "hw/i386/ich9.h"
-#define TYPE_ICH9_SMB_DEVICE "ICH9 SMB"
#define ICH9_SMB_DEVICE(obj) \
OBJECT_CHECK(ICH9SMBState, (obj), TYPE_ICH9_SMB_DEVICE)
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 8ca203211a..5a594be8ee 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -33,6 +33,7 @@
#include "hw/timer/hpet.h"
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
+#include "hw/acpi/cpu.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/acpi/bios-linker-loader.h"
#include "hw/loader.h"
@@ -43,6 +44,7 @@
#include "hw/acpi/tpm.h"
#include "sysemu/tpm_backend.h"
#include "hw/timer/mc146818rtc_regs.h"
+#include "sysemu/numa.h"
/* Supported chipsets: */
#include "hw/acpi/piix4.h"
@@ -58,6 +60,8 @@
#include "qapi/qmp/qint.h"
#include "qom/qom-qobject.h"
+#include "hw/acpi/ipmi.h"
+
/* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
* -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
* a little bit, there should be plenty of free space since the DSDT
@@ -327,12 +331,38 @@ build_fadt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm,
(void *)fadt, "FACP", sizeof(*fadt), 1, oem_id, oem_table_id);
}
+void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
+ CPUArchIdList *apic_ids, GArray *entry)
+{
+ int apic_id;
+ AcpiMadtProcessorApic *apic = acpi_data_push(entry, sizeof *apic);
+
+ apic_id = apic_ids->cpus[uid].arch_id;
+ apic->type = ACPI_APIC_PROCESSOR;
+ apic->length = sizeof(*apic);
+ apic->processor_id = uid;
+ apic->local_apic_id = apic_id;
+ if (apic_ids->cpus[uid].cpu != NULL) {
+ apic->flags = cpu_to_le32(1);
+ } else {
+ /* ACPI spec says that LAPIC entry for non present
+ * CPU may be omitted from MADT or it must be marked
+ * as disabled. However omitting non present CPU from
+ * MADT breaks hotplug on linux. So possible CPUs
+ * should be put in MADT but kept disabled.
+ */
+ apic->flags = cpu_to_le32(0);
+ }
+}
+
static void
build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
{
MachineClass *mc = MACHINE_GET_CLASS(pcms);
CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms));
int madt_start = table_data->len;
+ AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
+ AcpiDeviceIf *adev = ACPI_DEVICE_IF(pcms->acpi_dev);
AcpiMultipleApicTable *madt;
AcpiMadtIoApic *io_apic;
@@ -345,24 +375,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
madt->flags = cpu_to_le32(1);
for (i = 0; i < apic_ids->len; i++) {
- AcpiMadtProcessorApic *apic = acpi_data_push(table_data, sizeof *apic);
- int apic_id = apic_ids->cpus[i].arch_id;
-
- apic->type = ACPI_APIC_PROCESSOR;
- apic->length = sizeof(*apic);
- apic->processor_id = i;
- apic->local_apic_id = apic_id;
- if (apic_ids->cpus[i].cpu != NULL) {
- apic->flags = cpu_to_le32(1);
- } else {
- /* ACPI spec says that LAPIC entry for non present
- * CPU may be omitted from MADT or it must be marked
- * as disabled. However omitting non present CPU from
- * MADT breaks hotplug on linux. So possible CPUs
- * should be put in MADT but kept disabled.
- */
- apic->flags = cpu_to_le32(0);
- }
+ adevc->madt_cpu(adev, i, apic_ids, table_data);
}
g_free(apic_ids);
@@ -1334,8 +1347,10 @@ static Aml *build_com_device_aml(uint8_t uid)
static void build_isa_devices_aml(Aml *table)
{
ISADevice *fdc = pc_find_fdc0();
+ bool ambiguous;
Aml *scope = aml_scope("_SB.PCI0.ISA");
+ Object *obj = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous);
aml_append(scope, build_rtc_device_aml());
aml_append(scope, build_kbd_device_aml());
@@ -1347,6 +1362,14 @@ static void build_isa_devices_aml(Aml *table)
aml_append(scope, build_com_device_aml(1));
aml_append(scope, build_com_device_aml(2));
+ if (ambiguous) {
+ error_report("Multiple ISA busses, unable to define IPMI ACPI data");
+ } else if (!obj) {
+ error_report("No ISA bus, unable to define IPMI ACPI data");
+ } else {
+ build_acpi_ipmi_devices(scope, BUS(obj));
+ }
+
aml_append(table, scope);
}
@@ -1874,6 +1897,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free);
GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free);
PCMachineState *pcms = PC_MACHINE(machine);
+ PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(machine);
uint32_t nr_mem = machine->ram_slots;
int root_bus_limit = 0xFF;
PCIBus *bus = NULL;
@@ -1929,7 +1953,15 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
build_q35_pci0_int(dsdt);
}
- build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base);
+ if (pcmc->legacy_cpu_hotplug) {
+ build_legacy_cpu_hotplug_aml(dsdt, machine, pm->cpu_hp_io_base);
+ } else {
+ CPUHotplugFeatures opts = {
+ .apci_1_compatible = true, .has_legacy_cphp = true
+ };
+ build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base,
+ "\\_SB.PCI0", "\\_GPE._E02");
+ }
build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base,
pm->mem_hp_io_len);
@@ -2297,7 +2329,6 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
AcpiSratMemoryAffinity *numamem;
int i;
- uint64_t curnode;
int srat_start, numa_start, slots;
uint64_t mem_len, mem_base, next_base;
MachineClass *mc = MACHINE_GET_CLASS(machine);
@@ -2313,14 +2344,19 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
srat->reserved1 = cpu_to_le32(1);
for (i = 0; i < apic_ids->len; i++) {
+ int j;
int apic_id = apic_ids->cpus[i].arch_id;
core = acpi_data_push(table_data, sizeof *core);
core->type = ACPI_SRAT_PROCESSOR_APIC;
core->length = sizeof(*core);
core->local_apic_id = apic_id;
- curnode = pcms->node_cpu[apic_id];
- core->proximity_lo = curnode;
+ for (j = 0; j < nb_numa_nodes; j++) {
+ if (test_bit(i, numa_info[j].node_cpu)) {
+ core->proximity_lo = j;
+ break;
+ }
+ }
memset(core->proximity_hi, 0, 3);
core->local_sapic_eid = 0;
core->flags = cpu_to_le32(1);
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 347718f938..5eba704477 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -24,6 +24,7 @@
#include "exec/address-spaces.h"
#include "intel_iommu_internal.h"
#include "hw/pci/pci.h"
+#include "hw/pci/pci_bus.h"
/*#define DEBUG_INTEL_IOMMU*/
#ifdef DEBUG_INTEL_IOMMU
@@ -1871,6 +1872,16 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
return ret;
}
+static void vtd_iommu_notify_started(MemoryRegion *iommu)
+{
+ VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
+
+ hw_error("Device at bus %s addr %02x.%d requires iommu notifier which "
+ "is currently not supported by intel-iommu emulation",
+ vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn),
+ PCI_FUNC(vtd_as->devfn));
+}
+
static const VMStateDescription vtd_vmstate = {
.name = "iommu-intel",
.unmigratable = 1,
@@ -1938,6 +1949,7 @@ static void vtd_init(IntelIOMMUState *s)
memset(s->womask, 0, DMAR_REG_SIZE);
s->iommu_ops.translate = vtd_iommu_translate;
+ s->iommu_ops.notify_started = vtd_iommu_notify_started;
s->root = 0;
s->root_extended = false;
s->dmar_enabled = false;
diff --git a/hw/i386/kvm/pci-assign.c b/hw/i386/kvm/pci-assign.c
index f9c901471d..3623aa1965 100644
--- a/hw/i386/kvm/pci-assign.c
+++ b/hw/i386/kvm/pci-assign.c
@@ -36,8 +36,6 @@
#include "kvm_i386.h"
#include "hw/pci/pci-assign.h"
-#define MSIX_PAGE_SIZE 0x1000
-
/* From linux/ioport.h */
#define IORESOURCE_IO 0x00000100 /* Resource type */
#define IORESOURCE_MEM 0x00000200
@@ -122,6 +120,7 @@ typedef struct AssignedDevice {
int *msi_virq;
MSIXTableEntry *msix_table;
hwaddr msix_table_addr;
+ uint16_t msix_table_size;
uint16_t msix_max;
MemoryRegion mmio;
char *configfd_name;
@@ -1310,6 +1309,7 @@ static int assigned_device_pci_cap_init(PCIDevice *pci_dev, Error **errp)
bar_nr = msix_table_entry & PCI_MSIX_FLAGS_BIRMASK;
msix_table_entry &= ~PCI_MSIX_FLAGS_BIRMASK;
dev->msix_table_addr = pci_region[bar_nr].base_addr + msix_table_entry;
+ dev->msix_table_size = msix_max * sizeof(MSIXTableEntry);
dev->msix_max = msix_max;
}
@@ -1633,7 +1633,7 @@ static void assigned_dev_msix_reset(AssignedDevice *dev)
return;
}
- memset(dev->msix_table, 0, MSIX_PAGE_SIZE);
+ memset(dev->msix_table, 0, dev->msix_table_size);
for (i = 0, entry = dev->msix_table; i < dev->msix_max; i++, entry++) {
entry->ctrl = cpu_to_le32(0x1); /* Masked */
@@ -1642,8 +1642,8 @@ static void assigned_dev_msix_reset(AssignedDevice *dev)
static void assigned_dev_register_msix_mmio(AssignedDevice *dev, Error **errp)
{
- dev->msix_table = mmap(NULL, MSIX_PAGE_SIZE, PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE, 0, 0);
+ dev->msix_table = mmap(NULL, dev->msix_table_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
if (dev->msix_table == MAP_FAILED) {
error_setg_errno(errp, errno, "failed to allocate msix_table");
dev->msix_table = NULL;
@@ -1653,7 +1653,7 @@ static void assigned_dev_register_msix_mmio(AssignedDevice *dev, Error **errp)
assigned_dev_msix_reset(dev);
memory_region_init_io(&dev->mmio, OBJECT(dev), &assigned_dev_msix_mmio_ops,
- dev, "assigned-dev-msix", MSIX_PAGE_SIZE);
+ dev, "assigned-dev-msix", dev->msix_table_size);
}
static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev)
@@ -1662,7 +1662,7 @@ static void assigned_dev_unregister_msix_mmio(AssignedDevice *dev)
return;
}
- if (munmap(dev->msix_table, MSIX_PAGE_SIZE) == -1) {
+ if (munmap(dev->msix_table, dev->msix_table_size) == -1) {
error_report("error unmapping msix_table! %s", strerror(errno));
}
dev->msix_table = NULL;
@@ -1891,8 +1891,4 @@ static void assigned_dev_load_option_rom(AssignedDevice *dev)
pci_assign_dev_load_option_rom(&dev->dev, OBJECT(dev), &size,
dev->host.domain, dev->host.bus,
dev->host.slot, dev->host.function);
-
- if (!size) {
- error_report("pci-assign: Invalid ROM.");
- }
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 7198ed533c..44a8f3bcbd 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -505,7 +505,7 @@ typedef struct Port92State {
MemoryRegion io;
uint8_t outport;
- qemu_irq *a20_out;
+ qemu_irq a20_out;
} Port92State;
static void port92_write(void *opaque, hwaddr addr, uint64_t val,
@@ -516,7 +516,7 @@ static void port92_write(void *opaque, hwaddr addr, uint64_t val,
DPRINTF("port92: write 0x%02" PRIx64 "\n", val);
s->outport = val;
- qemu_set_irq(*s->a20_out, (val >> 1) & 1);
+ qemu_set_irq(s->a20_out, (val >> 1) & 1);
if ((val & 1) && !(oldval & 1)) {
qemu_system_reset_request();
}
@@ -535,9 +535,7 @@ static uint64_t port92_read(void *opaque, hwaddr addr,
static void port92_init(ISADevice *dev, qemu_irq *a20_out)
{
- Port92State *s = PORT92(dev);
-
- s->a20_out = a20_out;
+ qdev_connect_gpio_out_named(DEVICE(dev), PORT92_A20_LINE, 0, *a20_out);
}
static const VMStateDescription vmstate_port92_isa = {
@@ -574,6 +572,8 @@ static void port92_initfn(Object *obj)
memory_region_init_io(&s->io, OBJECT(s), &port92_ops, s, "port92", 1);
s->outport = 0;
+
+ qdev_init_gpio_out_named(DEVICE(obj), &s->a20_out, PORT92_A20_LINE, 1);
}
static void port92_realizefn(DeviceState *dev, Error **errp)
@@ -1179,7 +1179,7 @@ void pc_machine_done(Notifier *notifier, void *data)
void pc_guest_info_init(PCMachineState *pcms)
{
- int i, j;
+ int i;
pcms->apic_xrupt_override = kvm_allows_irq0_override();
pcms->numa_nodes = nb_numa_nodes;
@@ -1189,20 +1189,6 @@ void pc_guest_info_init(PCMachineState *pcms)
pcms->node_mem[i] = numa_info[i].node_mem;
}
- pcms->node_cpu = g_malloc0(pcms->apic_id_limit *
- sizeof *pcms->node_cpu);
-
- for (i = 0; i < max_cpus; i++) {
- unsigned int apic_id = x86_cpu_apic_id_from_index(i);
- assert(apic_id < pcms->apic_id_limit);
- for (j = 0; j < nb_numa_nodes; j++) {
- if (test_bit(i, numa_info[j].node_cpu)) {
- pcms->node_cpu[apic_id] = j;
- break;
- }
- }
- }
-
pcms->machine_done.notify = pc_machine_done;
qemu_add_machine_init_done_notifier(&pcms->machine_done);
}
@@ -1707,6 +1693,49 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
out:
error_propagate(errp, local_err);
}
+static void pc_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ HotplugHandlerClass *hhc;
+ Error *local_err = NULL;
+ PCMachineState *pcms = PC_MACHINE(hotplug_dev);
+
+ hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
+ hhc->unplug_request(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
+
+ if (local_err) {
+ goto out;
+ }
+
+ out:
+ error_propagate(errp, local_err);
+
+}
+
+static void pc_cpu_unplug_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ HotplugHandlerClass *hhc;
+ Error *local_err = NULL;
+ PCMachineState *pcms = PC_MACHINE(hotplug_dev);
+
+ hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev);
+ hhc->unplug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err);
+
+ if (local_err) {
+ goto out;
+ }
+
+ /*
+ * TODO: enable unplug once generic CPU remove bits land
+ * for now guest will be able to eject CPU ACPI wise but
+ * it will come back again on machine reset.
+ */
+ /* object_unparent(OBJECT(dev)); */
+
+ out:
+ error_propagate(errp, local_err);
+}
static void pc_machine_device_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
@@ -1723,6 +1752,8 @@ static void pc_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev,
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
pc_dimm_unplug_request(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ pc_cpu_unplug_request_cb(hotplug_dev, dev, errp);
} else {
error_setg(errp, "acpi: device unplug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
@@ -1734,6 +1765,8 @@ static void pc_machine_device_unplug_cb(HotplugHandler *hotplug_dev,
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
pc_dimm_unplug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ pc_cpu_unplug_cb(hotplug_dev, dev, errp);
} else {
error_setg(errp, "acpi: device unplug for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 53bc968bd0..c7d70af253 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -445,9 +445,11 @@ DEFINE_I440FX_MACHINE(v2_7, "pc-i440fx-2.7", NULL,
static void pc_i440fx_2_6_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_2_7_machine_options(m);
m->is_default = 0;
m->alias = NULL;
+ pcmc->legacy_cpu_hotplug = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_6);
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index e4b541f7b2..04b2684d37 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -60,6 +60,7 @@ static void pc_q35_init(MachineState *machine)
PCIHostState *phb;
PCIBus *host_bus;
PCIDevice *lpc;
+ DeviceState *lpc_dev;
BusState *idebus[MAX_SATA_PORTS];
ISADevice *rtc_state;
MemoryRegion *system_io = get_system_io();
@@ -159,12 +160,18 @@ static void pc_q35_init(MachineState *machine)
q35_host = Q35_HOST_DEVICE(qdev_create(NULL, TYPE_Q35_HOST_DEVICE));
object_property_add_child(qdev_get_machine(), "q35", OBJECT(q35_host), NULL);
- q35_host->mch.ram_memory = ram_memory;
- q35_host->mch.pci_address_space = pci_memory;
- q35_host->mch.system_memory = get_system_memory();
- q35_host->mch.address_space_io = system_io;
- q35_host->mch.below_4g_mem_size = pcms->below_4g_mem_size;
- q35_host->mch.above_4g_mem_size = pcms->above_4g_mem_size;
+ object_property_set_link(OBJECT(q35_host), OBJECT(ram_memory),
+ MCH_HOST_PROP_RAM_MEM, NULL);
+ object_property_set_link(OBJECT(q35_host), OBJECT(pci_memory),
+ MCH_HOST_PROP_PCI_MEM, NULL);
+ object_property_set_link(OBJECT(q35_host), OBJECT(get_system_memory()),
+ MCH_HOST_PROP_SYSTEM_MEM, NULL);
+ object_property_set_link(OBJECT(q35_host), OBJECT(system_io),
+ MCH_HOST_PROP_IO_MEM, NULL);
+ object_property_set_int(OBJECT(q35_host), pcms->below_4g_mem_size,
+ PCI_HOST_BELOW_4G_MEM_SIZE, NULL);
+ object_property_set_int(OBJECT(q35_host), pcms->above_4g_mem_size,
+ PCI_HOST_ABOVE_4G_MEM_SIZE, NULL);
/* pci */
qdev_init_nofail(DEVICE(q35_host));
phb = PCI_HOST_BRIDGE(q35_host);
@@ -184,16 +191,15 @@ static void pc_q35_init(MachineState *machine)
PC_MACHINE_ACPI_DEVICE_PROP, &error_abort);
ich9_lpc = ICH9_LPC_DEVICE(lpc);
- ich9_lpc->pic = gsi;
- ich9_lpc->ioapic = gsi_state->ioapic_irq;
+ lpc_dev = DEVICE(lpc);
+ for (i = 0; i < GSI_NUM_PINS; i++) {
+ qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, gsi[i]);
+ }
pci_bus_irqs(host_bus, ich9_lpc_set_irq, ich9_lpc_map_irq, ich9_lpc,
ICH9_LPC_NB_PIRQS);
pci_bus_set_route_irq_fn(host_bus, ich9_route_intx_pin_to_irq);
isa_bus = ich9_lpc->isa_bus;
- /*end early*/
- isa_bus_irqs(isa_bus, gsi);
-
if (kvm_pic_in_kernel()) {
i8259 = kvm_i8259_init(isa_bus);
} else if (xen_enabled()) {
@@ -294,8 +300,10 @@ DEFINE_Q35_MACHINE(v2_7, "pc-q35-2.7", NULL,
static void pc_q35_2_6_machine_options(MachineClass *m)
{
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_7_machine_options(m);
m->alias = NULL;
+ pcmc->legacy_cpu_hotplug = true;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_6);
}
diff --git a/hw/i386/pci-assign-load-rom.c b/hw/i386/pci-assign-load-rom.c
index 4bbb08c955..0d8e4b2826 100644
--- a/hw/i386/pci-assign-load-rom.c
+++ b/hw/i386/pci-assign-load-rom.c
@@ -40,6 +40,9 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
domain, bus, slot, function);
if (stat(rom_file, &st)) {
+ if (errno != ENOENT) {
+ error_report("pci-assign: Invalid ROM.");
+ }
return NULL;
}
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 502d4f1c7b..b1a7b65a7b 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -30,7 +30,7 @@
#include "qemu/error-report.h"
#include "sysemu/block-backend.h"
#include "sysemu/dma.h"
-#include "internal.h"
+#include <hw/ide/internal.h>
#include <hw/ide/pci.h>
#include <hw/ide/ahci.h>
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index fa57352fc8..56cc50661f 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -406,7 +406,7 @@ static void pmac_ide_flush(DBDMA_io *io)
IDEState *s = idebus_active_if(&m->bus);
if (s->bus->dma->aiocb) {
- blk_drain_all();
+ blk_drain(s->blk);
}
}
diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c
index 1d932ec19f..dc57e2c762 100644
--- a/hw/input/pckbd.c
+++ b/hw/input/pckbd.c
@@ -146,7 +146,7 @@ typedef struct KBDState {
qemu_irq irq_kbd;
qemu_irq irq_mouse;
- qemu_irq *a20_out;
+ qemu_irq a20_out;
hwaddr mask;
} KBDState;
@@ -224,9 +224,7 @@ static void outport_write(KBDState *s, uint32_t val)
{
DPRINTF("kbd: write outport=0x%02x\n", val);
s->outport = val;
- if (s->a20_out) {
- qemu_set_irq(*s->a20_out, (val >> 1) & 1);
- }
+ qemu_set_irq(s->a20_out, (val >> 1) & 1);
if (!(val & 1)) {
qemu_system_reset_request();
}
@@ -295,15 +293,11 @@ static void kbd_write_command(void *opaque, hwaddr addr,
kbd_queue(s, s->outport, 0);
break;
case KBD_CCMD_ENABLE_A20:
- if (s->a20_out) {
- qemu_irq_raise(*s->a20_out);
- }
+ qemu_irq_raise(s->a20_out);
s->outport |= KBD_OUT_A20;
break;
case KBD_CCMD_DISABLE_A20:
- if (s->a20_out) {
- qemu_irq_lower(*s->a20_out);
- }
+ qemu_irq_lower(s->a20_out);
s->outport &= ~KBD_OUT_A20;
break;
case KBD_CCMD_RESET:
@@ -507,10 +501,7 @@ void i8042_isa_mouse_fake_event(void *opaque)
void i8042_setup_a20_line(ISADevice *dev, qemu_irq *a20_out)
{
- ISAKBDState *isa = I8042(dev);
- KBDState *s = &isa->kbd;
-
- s->a20_out = a20_out;
+ qdev_connect_gpio_out_named(DEVICE(dev), I8042_A20_LINE, 0, *a20_out);
}
static const VMStateDescription vmstate_kbd_isa = {
@@ -552,6 +543,8 @@ static void i8042_initfn(Object *obj)
"i8042-data", 1);
memory_region_init_io(isa_s->io + 1, obj, &i8042_cmd_ops, s,
"i8042-cmd", 1);
+
+ qdev_init_gpio_out_named(DEVICE(obj), &s->a20_out, I8042_A20_LINE, 1);
}
static void i8042_realizefn(DeviceState *dev, Error **errp)
diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs
index c7bbf88edf..530df2eba6 100644
--- a/hw/intc/Makefile.objs
+++ b/hw/intc/Makefile.objs
@@ -30,6 +30,7 @@ obj-$(CONFIG_OPENPIC_KVM) += openpic_kvm.o
obj-$(CONFIG_RASPI) += bcm2835_ic.o bcm2836_control.o
obj-$(CONFIG_SH4) += sh_intc.o
obj-$(CONFIG_XICS) += xics.o
+obj-$(CONFIG_XICS_SPAPR) += xics_spapr.o
obj-$(CONFIG_XICS_KVM) += xics_kvm.o
obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o
obj-$(CONFIG_S390_FLIC) += s390_flic.o
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 5b2972ea9c..4633172bec 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -975,6 +975,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
r = CP_ACCESS_TRAP_EL3;
}
+ break;
default:
g_assert_not_reached();
}
@@ -1006,6 +1007,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
r = CP_ACCESS_TRAP_EL3;
}
+ break;
default:
g_assert_not_reached();
}
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 890d5d7442..06d8db6bd6 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -187,11 +187,11 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
case 0x1c: /* SysTick Calibration Value. */
return 10000;
case 0xd00: /* CPUID Base. */
- cpu = ARM_CPU(current_cpu);
+ cpu = ARM_CPU(qemu_get_cpu(0));
return cpu->midr;
case 0xd04: /* Interrupt Control State. */
/* VECTACTIVE */
- cpu = ARM_CPU(current_cpu);
+ cpu = ARM_CPU(qemu_get_cpu(0));
val = cpu->env.v7m.exception;
if (val == 1023) {
val = 0;
@@ -222,7 +222,7 @@ static uint32_t nvic_readl(nvic_state *s, uint32_t offset)
val |= (1 << 31);
return val;
case 0xd08: /* Vector Table Offset. */
- cpu = ARM_CPU(current_cpu);
+ cpu = ARM_CPU(qemu_get_cpu(0));
return cpu->env.v7m.vecbase;
case 0xd0c: /* Application Interrupt/Reset Control. */
return 0xfa050000;
@@ -349,7 +349,7 @@ static void nvic_writel(nvic_state *s, uint32_t offset, uint32_t value)
}
break;
case 0xd08: /* Vector Table Offset. */
- cpu = ARM_CPU(current_cpu);
+ cpu = ARM_CPU(qemu_get_cpu(0));
cpu->env.v7m.vecbase = value & 0xffffff80;
break;
case 0xd0c: /* Application Interrupt/Reset Control. */
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index cce7f3d112..cd48f42046 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -32,12 +32,11 @@
#include "hw/hw.h"
#include "trace.h"
#include "qemu/timer.h"
-#include "hw/ppc/spapr.h"
#include "hw/ppc/xics.h"
#include "qemu/error-report.h"
#include "qapi/visitor.h"
-static int get_cpu_index_by_dt_id(int cpu_dt_id)
+int xics_get_cpu_index_by_dt_id(int cpu_dt_id)
{
PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
@@ -48,31 +47,31 @@ static int get_cpu_index_by_dt_id(int cpu_dt_id)
return -1;
}
-void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu)
+void xics_cpu_destroy(XICSState *xics, PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
- ICPState *ss = &icp->ss[cs->cpu_index];
+ ICPState *ss = &xics->ss[cs->cpu_index];
- assert(cs->cpu_index < icp->nr_servers);
+ assert(cs->cpu_index < xics->nr_servers);
assert(cs == ss->cs);
ss->output = NULL;
ss->cs = NULL;
}
-void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
+void xics_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
- ICPState *ss = &icp->ss[cs->cpu_index];
- XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
+ ICPState *ss = &xics->ss[cs->cpu_index];
+ XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
- assert(cs->cpu_index < icp->nr_servers);
+ assert(cs->cpu_index < xics->nr_servers);
ss->cs = cs;
if (info->cpu_setup) {
- info->cpu_setup(icp, cpu);
+ info->cpu_setup(xics, cpu);
}
switch (PPC_INPUT(env)) {
@@ -96,21 +95,21 @@ void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
*/
static void xics_common_reset(DeviceState *d)
{
- XICSState *icp = XICS_COMMON(d);
+ XICSState *xics = XICS_COMMON(d);
int i;
- for (i = 0; i < icp->nr_servers; i++) {
- device_reset(DEVICE(&icp->ss[i]));
+ for (i = 0; i < xics->nr_servers; i++) {
+ device_reset(DEVICE(&xics->ss[i]));
}
- device_reset(DEVICE(icp->ics));
+ device_reset(DEVICE(xics->ics));
}
static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- XICSState *icp = XICS_COMMON(obj);
- int64_t value = icp->nr_irqs;
+ XICSState *xics = XICS_COMMON(obj);
+ int64_t value = xics->nr_irqs;
visit_type_int(v, name, &value, errp);
}
@@ -118,8 +117,8 @@ static void xics_prop_get_nr_irqs(Object *obj, Visitor *v, const char *name,
static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
- XICSState *icp = XICS_COMMON(obj);
- XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
+ XICSState *xics = XICS_COMMON(obj);
+ XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
Error *error = NULL;
int64_t value;
@@ -128,23 +127,23 @@ static void xics_prop_set_nr_irqs(Object *obj, Visitor *v, const char *name,
error_propagate(errp, error);
return;
}
- if (icp->nr_irqs) {
+ if (xics->nr_irqs) {
error_setg(errp, "Number of interrupts is already set to %u",
- icp->nr_irqs);
+ xics->nr_irqs);
return;
}
assert(info->set_nr_irqs);
- assert(icp->ics);
- info->set_nr_irqs(icp, value, errp);
+ assert(xics->ics);
+ info->set_nr_irqs(xics, value, errp);
}
static void xics_prop_get_nr_servers(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- XICSState *icp = XICS_COMMON(obj);
- int64_t value = icp->nr_servers;
+ XICSState *xics = XICS_COMMON(obj);
+ int64_t value = xics->nr_servers;
visit_type_int(v, name, &value, errp);
}
@@ -153,8 +152,8 @@ static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
- XICSState *icp = XICS_COMMON(obj);
- XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
+ XICSState *xics = XICS_COMMON(obj);
+ XICSStateClass *info = XICS_COMMON_GET_CLASS(xics);
Error *error = NULL;
int64_t value;
@@ -163,14 +162,14 @@ static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
error_propagate(errp, error);
return;
}
- if (icp->nr_servers) {
+ if (xics->nr_servers) {
error_setg(errp, "Number of servers is already set to %u",
- icp->nr_servers);
+ xics->nr_servers);
return;
}
assert(info->set_nr_servers);
- info->set_nr_servers(icp, value, errp);
+ info->set_nr_servers(xics, value, errp);
}
static void xics_common_initfn(Object *obj)
@@ -213,9 +212,9 @@ static void ics_reject(ICSState *ics, int nr);
static void ics_resend(ICSState *ics);
static void ics_eoi(ICSState *ics, int nr);
-static void icp_check_ipi(XICSState *icp, int server)
+static void icp_check_ipi(XICSState *xics, int server)
{
- ICPState *ss = icp->ss + server;
+ ICPState *ss = xics->ss + server;
if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
return;
@@ -224,7 +223,7 @@ static void icp_check_ipi(XICSState *icp, int server)
trace_xics_icp_check_ipi(server, ss->mfrr);
if (XISR(ss)) {
- ics_reject(icp->ics, XISR(ss));
+ ics_reject(xics->ics, XISR(ss));
}
ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
@@ -232,19 +231,19 @@ static void icp_check_ipi(XICSState *icp, int server)
qemu_irq_raise(ss->output);
}
-static void icp_resend(XICSState *icp, int server)
+static void icp_resend(XICSState *xics, int server)
{
- ICPState *ss = icp->ss + server;
+ ICPState *ss = xics->ss + server;
if (ss->mfrr < CPPR(ss)) {
- icp_check_ipi(icp, server);
+ icp_check_ipi(xics, server);
}
- ics_resend(icp->ics);
+ ics_resend(xics->ics);
}
-static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr)
+void icp_set_cppr(XICSState *xics, int server, uint8_t cppr)
{
- ICPState *ss = icp->ss + server;
+ ICPState *ss = xics->ss + server;
uint8_t old_cppr;
uint32_t old_xisr;
@@ -257,26 +256,26 @@ static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr)
ss->xirr &= ~XISR_MASK; /* Clear XISR */
ss->pending_priority = 0xff;
qemu_irq_lower(ss->output);
- ics_reject(icp->ics, old_xisr);
+ ics_reject(xics->ics, old_xisr);
}
} else {
if (!XISR(ss)) {
- icp_resend(icp, server);
+ icp_resend(xics, server);
}
}
}
-static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr)
+void icp_set_mfrr(XICSState *xics, int server, uint8_t mfrr)
{
- ICPState *ss = icp->ss + server;
+ ICPState *ss = xics->ss + server;
ss->mfrr = mfrr;
if (mfrr < CPPR(ss)) {
- icp_check_ipi(icp, server);
+ icp_check_ipi(xics, server);
}
}
-static uint32_t icp_accept(ICPState *ss)
+uint32_t icp_accept(ICPState *ss)
{
uint32_t xirr = ss->xirr;
@@ -289,31 +288,39 @@ static uint32_t icp_accept(ICPState *ss)
return xirr;
}
-static void icp_eoi(XICSState *icp, int server, uint32_t xirr)
+uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr)
{
- ICPState *ss = icp->ss + server;
+ if (mfrr) {
+ *mfrr = ss->mfrr;
+ }
+ return ss->xirr;
+}
+
+void icp_eoi(XICSState *xics, int server, uint32_t xirr)
+{
+ ICPState *ss = xics->ss + server;
/* Send EOI -> ICS */
ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
trace_xics_icp_eoi(server, xirr, ss->xirr);
- ics_eoi(icp->ics, xirr & XISR_MASK);
+ ics_eoi(xics->ics, xirr & XISR_MASK);
if (!XISR(ss)) {
- icp_resend(icp, server);
+ icp_resend(xics, server);
}
}
-static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority)
+static void icp_irq(XICSState *xics, int server, int nr, uint8_t priority)
{
- ICPState *ss = icp->ss + server;
+ ICPState *ss = xics->ss + server;
trace_xics_icp_irq(server, nr, priority);
if ((priority >= CPPR(ss))
|| (XISR(ss) && (ss->pending_priority <= priority))) {
- ics_reject(icp->ics, nr);
+ ics_reject(xics->ics, nr);
} else {
if (XISR(ss)) {
- ics_reject(icp->ics, XISR(ss));
+ ics_reject(xics->ics, XISR(ss));
}
ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
ss->pending_priority = priority;
@@ -390,12 +397,6 @@ static const TypeInfo icp_info = {
/*
* ICS: Source layer
*/
-static int ics_valid_irq(ICSState *ics, uint32_t nr)
-{
- return (nr >= ics->offset)
- && (nr < (ics->offset + ics->nr_irqs));
-}
-
static void resend_msi(ICSState *ics, int srcno)
{
ICSIRQState *irq = ics->irqs + srcno;
@@ -404,7 +405,7 @@ static void resend_msi(ICSState *ics, int srcno)
if (irq->status & XICS_STATUS_REJECTED) {
irq->status &= ~XICS_STATUS_REJECTED;
if (irq->priority != 0xff) {
- icp_irq(ics->icp, irq->server, srcno + ics->offset,
+ icp_irq(ics->xics, irq->server, srcno + ics->offset,
irq->priority);
}
}
@@ -418,7 +419,7 @@ static void resend_lsi(ICSState *ics, int srcno)
&& (irq->status & XICS_STATUS_ASSERTED)
&& !(irq->status & XICS_STATUS_SENT)) {
irq->status |= XICS_STATUS_SENT;
- icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
+ icp_irq(ics->xics, irq->server, srcno + ics->offset, irq->priority);
}
}
@@ -433,7 +434,7 @@ static void set_irq_msi(ICSState *ics, int srcno, int val)
irq->status |= XICS_STATUS_MASKED_PENDING;
trace_xics_masked_pending();
} else {
- icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
+ icp_irq(ics->xics, irq->server, srcno + ics->offset, irq->priority);
}
}
}
@@ -472,7 +473,7 @@ static void write_xive_msi(ICSState *ics, int srcno)
}
irq->status &= ~XICS_STATUS_MASKED_PENDING;
- icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
+ icp_irq(ics->xics, irq->server, srcno + ics->offset, irq->priority);
}
static void write_xive_lsi(ICSState *ics, int srcno)
@@ -480,8 +481,8 @@ static void write_xive_lsi(ICSState *ics, int srcno)
resend_lsi(ics, srcno);
}
-static void ics_write_xive(ICSState *ics, int nr, int server,
- uint8_t priority, uint8_t saved_priority)
+void ics_write_xive(ICSState *ics, int nr, int server,
+ uint8_t priority, uint8_t saved_priority)
{
int srcno = nr - ics->offset;
ICSIRQState *irq = ics->irqs + srcno;
@@ -557,8 +558,8 @@ static int ics_post_load(ICSState *ics, int version_id)
{
int i;
- for (i = 0; i < ics->icp->nr_servers; i++) {
- icp_resend(ics->icp, i);
+ for (i = 0; i < ics->xics->nr_servers; i++) {
+ icp_resend(ics->xics, i);
}
return 0;
@@ -658,14 +659,14 @@ static const TypeInfo ics_info = {
/*
* Exported functions
*/
-static int xics_find_source(XICSState *icp, int irq)
+int xics_find_source(XICSState *xics, int irq)
{
int sources = 1;
int src;
/* FIXME: implement multiple sources */
for (src = 0; src < sources; ++src) {
- ICSState *ics = &icp->ics[src];
+ ICSState *ics = &xics->ics[src];
if (ics_valid_irq(ics, irq)) {
return src;
}
@@ -674,19 +675,19 @@ static int xics_find_source(XICSState *icp, int irq)
return -1;
}
-qemu_irq xics_get_qirq(XICSState *icp, int irq)
+qemu_irq xics_get_qirq(XICSState *xics, int irq)
{
- int src = xics_find_source(icp, irq);
+ int src = xics_find_source(xics, irq);
if (src >= 0) {
- ICSState *ics = &icp->ics[src];
+ ICSState *ics = &xics->ics[src];
return ics->qirqs[irq - ics->offset];
}
return NULL;
}
-static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
+void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
{
assert(!(ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MASK));
@@ -694,412 +695,9 @@ static void ics_set_irq_type(ICSState *ics, int srcno, bool lsi)
lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI;
}
-void xics_set_irq_type(XICSState *icp, int irq, bool lsi)
-{
- int src = xics_find_source(icp, irq);
- ICSState *ics;
-
- assert(src >= 0);
-
- ics = &icp->ics[src];
- ics_set_irq_type(ics, irq - ics->offset, lsi);
-}
-
-#define ICS_IRQ_FREE(ics, srcno) \
- (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
-
-static int ics_find_free_block(ICSState *ics, int num, int alignnum)
-{
- int first, i;
-
- for (first = 0; first < ics->nr_irqs; first += alignnum) {
- if (num > (ics->nr_irqs - first)) {
- return -1;
- }
- for (i = first; i < first + num; ++i) {
- if (!ICS_IRQ_FREE(ics, i)) {
- break;
- }
- }
- if (i == (first + num)) {
- return first;
- }
- }
-
- return -1;
-}
-
-int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp)
-{
- ICSState *ics = &icp->ics[src];
- int irq;
-
- if (irq_hint) {
- assert(src == xics_find_source(icp, irq_hint));
- if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
- error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
- return -1;
- }
- irq = irq_hint;
- } else {
- irq = ics_find_free_block(ics, 1, 1);
- if (irq < 0) {
- error_setg(errp, "can't allocate IRQ: no IRQ left");
- return -1;
- }
- irq += ics->offset;
- }
-
- ics_set_irq_type(ics, irq - ics->offset, lsi);
- trace_xics_alloc(src, irq);
-
- return irq;
-}
-
-/*
- * Allocate block of consecutive IRQs, and return the number of the first IRQ in the block.
- * If align==true, aligns the first IRQ number to num.
- */
-int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align,
- Error **errp)
-{
- int i, first = -1;
- ICSState *ics = &icp->ics[src];
-
- assert(src == 0);
- /*
- * MSIMesage::data is used for storing VIRQ so
- * it has to be aligned to num to support multiple
- * MSI vectors. MSI-X is not affected by this.
- * The hint is used for the first IRQ, the rest should
- * be allocated continuously.
- */
- if (align) {
- assert((num == 1) || (num == 2) || (num == 4) ||
- (num == 8) || (num == 16) || (num == 32));
- first = ics_find_free_block(ics, num, num);
- } else {
- first = ics_find_free_block(ics, num, 1);
- }
- if (first < 0) {
- error_setg(errp, "can't find a free %d-IRQ block", num);
- return -1;
- }
-
- if (first >= 0) {
- for (i = first; i < first + num; ++i) {
- ics_set_irq_type(ics, i, lsi);
- }
- }
- first += ics->offset;
-
- trace_xics_alloc_block(src, first, num, lsi, align);
-
- return first;
-}
-
-static void ics_free(ICSState *ics, int srcno, int num)
-{
- int i;
-
- for (i = srcno; i < srcno + num; ++i) {
- if (ICS_IRQ_FREE(ics, i)) {
- trace_xics_ics_free_warn(ics - ics->icp->ics, i + ics->offset);
- }
- memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
- }
-}
-
-void xics_free(XICSState *icp, int irq, int num)
-{
- int src = xics_find_source(icp, irq);
-
- if (src >= 0) {
- ICSState *ics = &icp->ics[src];
-
- /* FIXME: implement multiple sources */
- assert(src == 0);
-
- trace_xics_ics_free(ics - icp->ics, irq, num);
- ics_free(ics, irq - ics->offset, num);
- }
-}
-
-/*
- * Guest interfaces
- */
-
-static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- target_ulong opcode, target_ulong *args)
-{
- CPUState *cs = CPU(cpu);
- target_ulong cppr = args[0];
-
- icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
- return H_SUCCESS;
-}
-
-static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- target_ulong opcode, target_ulong *args)
-{
- target_ulong server = get_cpu_index_by_dt_id(args[0]);
- target_ulong mfrr = args[1];
-
- if (server >= spapr->icp->nr_servers) {
- return H_PARAMETER;
- }
-
- icp_set_mfrr(spapr->icp, server, mfrr);
- return H_SUCCESS;
-}
-
-static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- target_ulong opcode, target_ulong *args)
-{
- CPUState *cs = CPU(cpu);
- uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
-
- args[0] = xirr;
- return H_SUCCESS;
-}
-
-static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- target_ulong opcode, target_ulong *args)
-{
- CPUState *cs = CPU(cpu);
- ICPState *ss = &spapr->icp->ss[cs->cpu_index];
- uint32_t xirr = icp_accept(ss);
-
- args[0] = xirr;
- args[1] = cpu_get_host_ticks();
- return H_SUCCESS;
-}
-
-static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- target_ulong opcode, target_ulong *args)
-{
- CPUState *cs = CPU(cpu);
- target_ulong xirr = args[0];
-
- icp_eoi(spapr->icp, cs->cpu_index, xirr);
- return H_SUCCESS;
-}
-
-static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- target_ulong opcode, target_ulong *args)
-{
- CPUState *cs = CPU(cpu);
- ICPState *ss = &spapr->icp->ss[cs->cpu_index];
-
- args[0] = ss->xirr;
- args[1] = ss->mfrr;
-
- return H_SUCCESS;
-}
-
-static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- uint32_t token,
- uint32_t nargs, target_ulong args,
- uint32_t nret, target_ulong rets)
-{
- ICSState *ics = spapr->icp->ics;
- uint32_t nr, server, priority;
-
- if ((nargs != 3) || (nret != 1)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- nr = rtas_ld(args, 0);
- server = get_cpu_index_by_dt_id(rtas_ld(args, 1));
- priority = rtas_ld(args, 2);
-
- if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
- || (priority > 0xff)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- ics_write_xive(ics, nr, server, priority, priority);
-
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
-}
-
-static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- uint32_t token,
- uint32_t nargs, target_ulong args,
- uint32_t nret, target_ulong rets)
-{
- ICSState *ics = spapr->icp->ics;
- uint32_t nr;
-
- if ((nargs != 1) || (nret != 3)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- nr = rtas_ld(args, 0);
-
- if (!ics_valid_irq(ics, nr)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
- rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
- rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
-}
-
-static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- uint32_t token,
- uint32_t nargs, target_ulong args,
- uint32_t nret, target_ulong rets)
-{
- ICSState *ics = spapr->icp->ics;
- uint32_t nr;
-
- if ((nargs != 1) || (nret != 1)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- nr = rtas_ld(args, 0);
-
- if (!ics_valid_irq(ics, nr)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
- ics->irqs[nr - ics->offset].priority);
-
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
-}
-
-static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
- uint32_t token,
- uint32_t nargs, target_ulong args,
- uint32_t nret, target_ulong rets)
-{
- ICSState *ics = spapr->icp->ics;
- uint32_t nr;
-
- if ((nargs != 1) || (nret != 1)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- nr = rtas_ld(args, 0);
-
- if (!ics_valid_irq(ics, nr)) {
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
- return;
- }
-
- ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
- ics->irqs[nr - ics->offset].saved_priority,
- ics->irqs[nr - ics->offset].saved_priority);
-
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
-}
-
-/*
- * XICS
- */
-
-static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp)
-{
- icp->nr_irqs = icp->ics->nr_irqs = nr_irqs;
-}
-
-static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers,
- Error **errp)
-{
- int i;
-
- icp->nr_servers = nr_servers;
-
- icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState));
- for (i = 0; i < icp->nr_servers; i++) {
- char buffer[32];
- object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP);
- snprintf(buffer, sizeof(buffer), "icp[%d]", i);
- object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]),
- errp);
- }
-}
-
-static void xics_realize(DeviceState *dev, Error **errp)
-{
- XICSState *icp = XICS(dev);
- Error *error = NULL;
- int i;
-
- if (!icp->nr_servers) {
- error_setg(errp, "Number of servers needs to be greater 0");
- return;
- }
-
- /* Registration of global state belongs into realize */
- spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
- spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
- spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off);
- spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on);
-
- spapr_register_hypercall(H_CPPR, h_cppr);
- spapr_register_hypercall(H_IPI, h_ipi);
- spapr_register_hypercall(H_XIRR, h_xirr);
- spapr_register_hypercall(H_XIRR_X, h_xirr_x);
- spapr_register_hypercall(H_EOI, h_eoi);
- spapr_register_hypercall(H_IPOLL, h_ipoll);
-
- object_property_set_bool(OBJECT(icp->ics), true, "realized", &error);
- if (error) {
- error_propagate(errp, error);
- return;
- }
-
- for (i = 0; i < icp->nr_servers; i++) {
- object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error);
- if (error) {
- error_propagate(errp, error);
- return;
- }
- }
-}
-
-static void xics_initfn(Object *obj)
-{
- XICSState *xics = XICS(obj);
-
- xics->ics = ICS(object_new(TYPE_ICS));
- object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
- xics->ics->icp = xics;
-}
-
-static void xics_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- XICSStateClass *xsc = XICS_CLASS(oc);
-
- dc->realize = xics_realize;
- xsc->set_nr_irqs = xics_set_nr_irqs;
- xsc->set_nr_servers = xics_set_nr_servers;
-}
-
-static const TypeInfo xics_info = {
- .name = TYPE_XICS,
- .parent = TYPE_XICS_COMMON,
- .instance_size = sizeof(XICSState),
- .class_size = sizeof(XICSStateClass),
- .class_init = xics_class_init,
- .instance_init = xics_initfn,
-};
-
static void xics_register_types(void)
{
type_register_static(&xics_common_info);
- type_register_static(&xics_info);
type_register_static(&ics_info);
type_register_static(&icp_info);
}
diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c
index b17d6a9f43..edbd62fd1b 100644
--- a/hw/intc/xics_kvm.c
+++ b/hw/intc/xics_kvm.c
@@ -145,7 +145,7 @@ static const TypeInfo icp_kvm_info = {
*/
static void ics_get_kvm_state(ICSState *ics)
{
- KVMXICSState *icpkvm = KVM_XICS(ics->icp);
+ KVMXICSState *xicskvm = XICS_SPAPR_KVM(ics->xics);
uint64_t state;
struct kvm_device_attr attr = {
.flags = 0,
@@ -160,7 +160,7 @@ static void ics_get_kvm_state(ICSState *ics)
attr.attr = i + ics->offset;
- ret = ioctl(icpkvm->kernel_xics_fd, KVM_GET_DEVICE_ATTR, &attr);
+ ret = ioctl(xicskvm->kernel_xics_fd, KVM_GET_DEVICE_ATTR, &attr);
if (ret != 0) {
error_report("Unable to retrieve KVM interrupt controller state"
" for IRQ %d: %s", i + ics->offset, strerror(errno));
@@ -204,7 +204,7 @@ static void ics_get_kvm_state(ICSState *ics)
static int ics_set_kvm_state(ICSState *ics, int version_id)
{
- KVMXICSState *icpkvm = KVM_XICS(ics->icp);
+ KVMXICSState *xicskvm = XICS_SPAPR_KVM(ics->xics);
uint64_t state;
struct kvm_device_attr attr = {
.flags = 0,
@@ -238,7 +238,7 @@ static int ics_set_kvm_state(ICSState *ics, int version_id)
}
}
- ret = ioctl(icpkvm->kernel_xics_fd, KVM_SET_DEVICE_ATTR, &attr);
+ ret = ioctl(xicskvm->kernel_xics_fd, KVM_SET_DEVICE_ATTR, &attr);
if (ret != 0) {
error_report("Unable to restore KVM interrupt controller state"
" for IRQs %d: %s", i + ics->offset, strerror(errno));
@@ -324,17 +324,17 @@ static const TypeInfo ics_kvm_info = {
/*
* XICS-KVM
*/
-static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
+static void xics_kvm_cpu_setup(XICSState *xics, PowerPCCPU *cpu)
{
CPUState *cs;
ICPState *ss;
- KVMXICSState *icpkvm = KVM_XICS(icp);
+ KVMXICSState *xicskvm = XICS_SPAPR_KVM(xics);
cs = CPU(cpu);
- ss = &icp->ss[cs->cpu_index];
+ ss = &xics->ss[cs->cpu_index];
- assert(cs->cpu_index < icp->nr_servers);
- if (icpkvm->kernel_xics_fd == -1) {
+ assert(cs->cpu_index < xics->nr_servers);
+ if (xicskvm->kernel_xics_fd == -1) {
abort();
}
@@ -347,11 +347,12 @@ static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
return;
}
- if (icpkvm->kernel_xics_fd != -1) {
+ if (xicskvm->kernel_xics_fd != -1) {
int ret;
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_IRQ_XICS, 0,
- icpkvm->kernel_xics_fd, kvm_arch_vcpu_id(cs));
+ xicskvm->kernel_xics_fd,
+ kvm_arch_vcpu_id(cs));
if (ret < 0) {
error_report("Unable to connect CPU%ld to kernel XICS: %s",
kvm_arch_vcpu_id(cs), strerror(errno));
@@ -361,24 +362,25 @@ static void xics_kvm_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
}
}
-static void xics_kvm_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp)
+static void xics_kvm_set_nr_irqs(XICSState *xics, uint32_t nr_irqs,
+ Error **errp)
{
- icp->nr_irqs = icp->ics->nr_irqs = nr_irqs;
+ xics->nr_irqs = xics->ics->nr_irqs = nr_irqs;
}
-static void xics_kvm_set_nr_servers(XICSState *icp, uint32_t nr_servers,
+static void xics_kvm_set_nr_servers(XICSState *xics, uint32_t nr_servers,
Error **errp)
{
int i;
- icp->nr_servers = nr_servers;
+ xics->nr_servers = nr_servers;
- icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState));
- for (i = 0; i < icp->nr_servers; i++) {
+ xics->ss = g_malloc0(xics->nr_servers * sizeof(ICPState));
+ for (i = 0; i < xics->nr_servers; i++) {
char buffer[32];
- object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_KVM_ICP);
+ object_initialize(&xics->ss[i], sizeof(xics->ss[i]), TYPE_KVM_ICP);
snprintf(buffer, sizeof(buffer), "icp[%d]", i);
- object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]),
+ object_property_add_child(OBJECT(xics), buffer, OBJECT(&xics->ss[i]),
errp);
}
}
@@ -394,8 +396,8 @@ static void rtas_dummy(PowerPCCPU *cpu, sPAPRMachineState *spapr,
static void xics_kvm_realize(DeviceState *dev, Error **errp)
{
- KVMXICSState *icpkvm = KVM_XICS(dev);
- XICSState *icp = XICS_COMMON(dev);
+ KVMXICSState *xicskvm = XICS_SPAPR_KVM(dev);
+ XICSState *xics = XICS_COMMON(dev);
int i, rc;
Error *error = NULL;
struct kvm_create_device xics_create_device = {
@@ -445,17 +447,18 @@ static void xics_kvm_realize(DeviceState *dev, Error **errp)
goto fail;
}
- icpkvm->kernel_xics_fd = xics_create_device.fd;
+ xicskvm->kernel_xics_fd = xics_create_device.fd;
- object_property_set_bool(OBJECT(icp->ics), true, "realized", &error);
+ object_property_set_bool(OBJECT(xics->ics), true, "realized", &error);
if (error) {
error_propagate(errp, error);
goto fail;
}
- assert(icp->nr_servers);
- for (i = 0; i < icp->nr_servers; i++) {
- object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error);
+ assert(xics->nr_servers);
+ for (i = 0; i < xics->nr_servers; i++) {
+ object_property_set_bool(OBJECT(&xics->ss[i]), true, "realized",
+ &error);
if (error) {
error_propagate(errp, error);
goto fail;
@@ -481,7 +484,7 @@ static void xics_kvm_initfn(Object *obj)
xics->ics = ICS(object_new(TYPE_KVM_ICS));
object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
- xics->ics->icp = xics;
+ xics->ics->xics = xics;
}
static void xics_kvm_class_init(ObjectClass *oc, void *data)
@@ -495,8 +498,8 @@ static void xics_kvm_class_init(ObjectClass *oc, void *data)
xsc->set_nr_servers = xics_kvm_set_nr_servers;
}
-static const TypeInfo xics_kvm_info = {
- .name = TYPE_KVM_XICS,
+static const TypeInfo xics_spapr_kvm_info = {
+ .name = TYPE_XICS_SPAPR_KVM,
.parent = TYPE_XICS_COMMON,
.instance_size = sizeof(KVMXICSState),
.class_init = xics_kvm_class_init,
@@ -505,7 +508,7 @@ static const TypeInfo xics_kvm_info = {
static void xics_kvm_register_types(void)
{
- type_register_static(&xics_kvm_info);
+ type_register_static(&xics_spapr_kvm_info);
type_register_static(&ics_kvm_info);
type_register_static(&icp_kvm_info);
}
diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
new file mode 100644
index 0000000000..618826dacf
--- /dev/null
+++ b/hw/intc/xics_spapr.c
@@ -0,0 +1,434 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
+ *
+ * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "trace.h"
+#include "qemu/timer.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/xics.h"
+#include "qapi/visitor.h"
+#include "qapi/error.h"
+
+/*
+ * Guest interfaces
+ */
+
+static target_ulong h_cppr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+ target_ulong cppr = args[0];
+
+ icp_set_cppr(spapr->xics, cs->cpu_index, cppr);
+ return H_SUCCESS;
+}
+
+static target_ulong h_ipi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong server = xics_get_cpu_index_by_dt_id(args[0]);
+ target_ulong mfrr = args[1];
+
+ if (server >= spapr->xics->nr_servers) {
+ return H_PARAMETER;
+ }
+
+ icp_set_mfrr(spapr->xics, server, mfrr);
+ return H_SUCCESS;
+}
+
+static target_ulong h_xirr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+ uint32_t xirr = icp_accept(spapr->xics->ss + cs->cpu_index);
+
+ args[0] = xirr;
+ return H_SUCCESS;
+}
+
+static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+ ICPState *ss = &spapr->xics->ss[cs->cpu_index];
+ uint32_t xirr = icp_accept(ss);
+
+ args[0] = xirr;
+ args[1] = cpu_get_host_ticks();
+ return H_SUCCESS;
+}
+
+static target_ulong h_eoi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+ target_ulong xirr = args[0];
+
+ icp_eoi(spapr->xics, cs->cpu_index, xirr);
+ return H_SUCCESS;
+}
+
+static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+ uint32_t mfrr;
+ uint32_t xirr = icp_ipoll(spapr->xics->ss + cs->cpu_index, &mfrr);
+
+ args[0] = xirr;
+ args[1] = mfrr;
+
+ return H_SUCCESS;
+}
+
+static void rtas_set_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->xics->ics;
+ uint32_t nr, server, priority;
+
+ if ((nargs != 3) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+ server = xics_get_cpu_index_by_dt_id(rtas_ld(args, 1));
+ priority = rtas_ld(args, 2);
+
+ if (!ics_valid_irq(ics, nr) || (server >= ics->xics->nr_servers)
+ || (priority > 0xff)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ ics_write_xive(ics, nr, server, priority, priority);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_get_xive(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->xics->ics;
+ uint32_t nr;
+
+ if ((nargs != 1) || (nret != 3)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+
+ if (!ics_valid_irq(ics, nr)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
+ rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
+}
+
+static void rtas_int_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->xics->ics;
+ uint32_t nr;
+
+ if ((nargs != 1) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+
+ if (!ics_valid_irq(ics, nr)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
+ ics->irqs[nr - ics->offset].priority);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_int_on(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ ICSState *ics = spapr->xics->ics;
+ uint32_t nr;
+
+ if ((nargs != 1) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ nr = rtas_ld(args, 0);
+
+ if (!ics_valid_irq(ics, nr)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
+ ics->irqs[nr - ics->offset].saved_priority,
+ ics->irqs[nr - ics->offset].saved_priority);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void xics_spapr_set_nr_irqs(XICSState *xics, uint32_t nr_irqs,
+ Error **errp)
+{
+ xics->nr_irqs = xics->ics->nr_irqs = nr_irqs;
+}
+
+static void xics_spapr_set_nr_servers(XICSState *xics, uint32_t nr_servers,
+ Error **errp)
+{
+ int i;
+
+ xics->nr_servers = nr_servers;
+
+ xics->ss = g_malloc0(xics->nr_servers * sizeof(ICPState));
+ for (i = 0; i < xics->nr_servers; i++) {
+ char buffer[32];
+ object_initialize(&xics->ss[i], sizeof(xics->ss[i]), TYPE_ICP);
+ snprintf(buffer, sizeof(buffer), "icp[%d]", i);
+ object_property_add_child(OBJECT(xics), buffer, OBJECT(&xics->ss[i]),
+ errp);
+ }
+}
+
+static void xics_spapr_realize(DeviceState *dev, Error **errp)
+{
+ XICSState *xics = XICS_SPAPR(dev);
+ Error *error = NULL;
+ int i;
+
+ if (!xics->nr_servers) {
+ error_setg(errp, "Number of servers needs to be greater 0");
+ return;
+ }
+
+ /* Registration of global state belongs into realize */
+ spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive);
+ spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive);
+ spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off);
+ spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on);
+
+ spapr_register_hypercall(H_CPPR, h_cppr);
+ spapr_register_hypercall(H_IPI, h_ipi);
+ spapr_register_hypercall(H_XIRR, h_xirr);
+ spapr_register_hypercall(H_XIRR_X, h_xirr_x);
+ spapr_register_hypercall(H_EOI, h_eoi);
+ spapr_register_hypercall(H_IPOLL, h_ipoll);
+
+ object_property_set_bool(OBJECT(xics->ics), true, "realized", &error);
+ if (error) {
+ error_propagate(errp, error);
+ return;
+ }
+
+ for (i = 0; i < xics->nr_servers; i++) {
+ object_property_set_bool(OBJECT(&xics->ss[i]), true, "realized",
+ &error);
+ if (error) {
+ error_propagate(errp, error);
+ return;
+ }
+ }
+}
+
+static void xics_spapr_initfn(Object *obj)
+{
+ XICSState *xics = XICS_SPAPR(obj);
+
+ xics->ics = ICS(object_new(TYPE_ICS));
+ object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
+ xics->ics->xics = xics;
+}
+
+static void xics_spapr_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ XICSStateClass *xsc = XICS_SPAPR_CLASS(oc);
+
+ dc->realize = xics_spapr_realize;
+ xsc->set_nr_irqs = xics_spapr_set_nr_irqs;
+ xsc->set_nr_servers = xics_spapr_set_nr_servers;
+}
+
+static const TypeInfo xics_spapr_info = {
+ .name = TYPE_XICS_SPAPR,
+ .parent = TYPE_XICS_COMMON,
+ .instance_size = sizeof(XICSState),
+ .class_size = sizeof(XICSStateClass),
+ .class_init = xics_spapr_class_init,
+ .instance_init = xics_spapr_initfn,
+};
+
+#define ICS_IRQ_FREE(ics, srcno) \
+ (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
+
+static int ics_find_free_block(ICSState *ics, int num, int alignnum)
+{
+ int first, i;
+
+ for (first = 0; first < ics->nr_irqs; first += alignnum) {
+ if (num > (ics->nr_irqs - first)) {
+ return -1;
+ }
+ for (i = first; i < first + num; ++i) {
+ if (!ICS_IRQ_FREE(ics, i)) {
+ break;
+ }
+ }
+ if (i == (first + num)) {
+ return first;
+ }
+ }
+
+ return -1;
+}
+
+int xics_spapr_alloc(XICSState *xics, int src, int irq_hint, bool lsi,
+ Error **errp)
+{
+ ICSState *ics = &xics->ics[src];
+ int irq;
+
+ if (irq_hint) {
+ assert(src == xics_find_source(xics, irq_hint));
+ if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
+ error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
+ return -1;
+ }
+ irq = irq_hint;
+ } else {
+ irq = ics_find_free_block(ics, 1, 1);
+ if (irq < 0) {
+ error_setg(errp, "can't allocate IRQ: no IRQ left");
+ return -1;
+ }
+ irq += ics->offset;
+ }
+
+ ics_set_irq_type(ics, irq - ics->offset, lsi);
+ trace_xics_alloc(src, irq);
+
+ return irq;
+}
+
+/*
+ * Allocate block of consecutive IRQs, and return the number of the first IRQ in
+ * the block. If align==true, aligns the first IRQ number to num.
+ */
+int xics_spapr_alloc_block(XICSState *xics, int src, int num, bool lsi,
+ bool align, Error **errp)
+{
+ int i, first = -1;
+ ICSState *ics = &xics->ics[src];
+
+ assert(src == 0);
+ /*
+ * MSIMesage::data is used for storing VIRQ so
+ * it has to be aligned to num to support multiple
+ * MSI vectors. MSI-X is not affected by this.
+ * The hint is used for the first IRQ, the rest should
+ * be allocated continuously.
+ */
+ if (align) {
+ assert((num == 1) || (num == 2) || (num == 4) ||
+ (num == 8) || (num == 16) || (num == 32));
+ first = ics_find_free_block(ics, num, num);
+ } else {
+ first = ics_find_free_block(ics, num, 1);
+ }
+ if (first < 0) {
+ error_setg(errp, "can't find a free %d-IRQ block", num);
+ return -1;
+ }
+
+ if (first >= 0) {
+ for (i = first; i < first + num; ++i) {
+ ics_set_irq_type(ics, i, lsi);
+ }
+ }
+ first += ics->offset;
+
+ trace_xics_alloc_block(src, first, num, lsi, align);
+
+ return first;
+}
+
+static void ics_free(ICSState *ics, int srcno, int num)
+{
+ int i;
+
+ for (i = srcno; i < srcno + num; ++i) {
+ if (ICS_IRQ_FREE(ics, i)) {
+ trace_xics_ics_free_warn(ics - ics->xics->ics, i + ics->offset);
+ }
+ memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
+ }
+}
+
+void xics_spapr_free(XICSState *xics, int irq, int num)
+{
+ int src = xics_find_source(xics, irq);
+
+ if (src >= 0) {
+ ICSState *ics = &xics->ics[src];
+
+ /* FIXME: implement multiple sources */
+ assert(src == 0);
+
+ trace_xics_ics_free(ics - xics->ics, irq, num);
+ ics_free(ics, irq - ics->offset, num);
+ }
+}
+
+static void xics_spapr_register_types(void)
+{
+ type_register_static(&xics_spapr_info);
+}
+
+type_init(xics_spapr_register_types)
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index 7aa115caf2..ce74db232a 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -97,6 +97,13 @@ void isa_init_irq(ISADevice *dev, qemu_irq *p, int isairq)
dev->nirqs++;
}
+void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, int isairq)
+{
+ qemu_irq irq;
+ isa_init_irq(isadev, &irq, isairq);
+ qdev_connect_gpio_out(DEVICE(isadev), gpioirq, irq);
+}
+
void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16)
{
assert(bus && dma8 && dma16);
diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c
index 213741bc21..10d1ee8b93 100644
--- a/hw/isa/lpc_ich9.c
+++ b/hw/isa/lpc_ich9.c
@@ -49,8 +49,6 @@
#include "sysemu/sysemu.h"
#include "qom/cpu.h"
-static int ich9_lpc_sci_irq(ICH9LPCState *lpc);
-
/*****************************************************************************/
/* ICH9 LPC PCI to ISA bridge */
@@ -204,41 +202,28 @@ static void ich9_lpc_pic_irq(ICH9LPCState *lpc, int pirq_num,
abort();
}
-/* pic_irq: i8254 irq 0-15 */
-static void ich9_lpc_update_pic(ICH9LPCState *lpc, int pic_irq)
+/* gsi: i8259+ioapic irq 0-15, otherwise assert */
+static void ich9_lpc_update_pic(ICH9LPCState *lpc, int gsi)
{
int i, pic_level;
+ assert(gsi < ICH9_LPC_PIC_NUM_PINS);
+
/* The pic level is the logical OR of all the PCI irqs mapped to it */
pic_level = 0;
for (i = 0; i < ICH9_LPC_NB_PIRQS; i++) {
int tmp_irq;
int tmp_dis;
ich9_lpc_pic_irq(lpc, i, &tmp_irq, &tmp_dis);
- if (!tmp_dis && pic_irq == tmp_irq) {
+ if (!tmp_dis && tmp_irq == gsi) {
pic_level |= pci_bus_get_irq_level(lpc->d.bus, i);
}
}
- if (pic_irq == ich9_lpc_sci_irq(lpc)) {
+ if (gsi == lpc->sci_gsi) {
pic_level |= lpc->sci_level;
}
- qemu_set_irq(lpc->pic[pic_irq], pic_level);
-}
-
-/* pirq: pirq[A-H] 0-7*/
-static void ich9_lpc_update_by_pirq(ICH9LPCState *lpc, int pirq)
-{
- int pic_irq;
- int pic_dis;
-
- ich9_lpc_pic_irq(lpc, pirq, &pic_irq, &pic_dis);
- assert(pic_irq < ICH9_LPC_PIC_NUM_PINS);
- if (pic_dis) {
- return;
- }
-
- ich9_lpc_update_pic(lpc, pic_irq);
+ qemu_set_irq(lpc->gsi[gsi], pic_level);
}
/* APIC mode: GSIx: PIRQ[A-H] -> GSI 16, ... no pirq shares same APIC pins. */
@@ -252,29 +237,32 @@ static int ich9_gsi_to_pirq(int gsi)
return gsi - ICH9_LPC_PIC_NUM_PINS;
}
+/* gsi: ioapic irq 16-23, otherwise assert */
static void ich9_lpc_update_apic(ICH9LPCState *lpc, int gsi)
{
int level = 0;
- if (gsi >= ICH9_LPC_PIC_NUM_PINS) {
- level |= pci_bus_get_irq_level(lpc->d.bus, ich9_gsi_to_pirq(gsi));
- }
- if (gsi == ich9_lpc_sci_irq(lpc)) {
+ assert(gsi >= ICH9_LPC_PIC_NUM_PINS);
+
+ level |= pci_bus_get_irq_level(lpc->d.bus, ich9_gsi_to_pirq(gsi));
+ if (gsi == lpc->sci_gsi) {
level |= lpc->sci_level;
}
- qemu_set_irq(lpc->ioapic[gsi], level);
+ qemu_set_irq(lpc->gsi[gsi], level);
}
void ich9_lpc_set_irq(void *opaque, int pirq, int level)
{
ICH9LPCState *lpc = opaque;
+ int pic_irq, pic_dis;
assert(0 <= pirq);
assert(pirq < ICH9_LPC_NB_PIRQS);
ich9_lpc_update_apic(lpc, ich9_pirq_to_gsi(pirq));
- ich9_lpc_update_by_pirq(lpc, pirq);
+ ich9_lpc_pic_irq(lpc, pirq, &pic_irq, &pic_dis);
+ ich9_lpc_update_pic(lpc, pic_irq);
}
/* return the pirq number (PIRQ[A-H]:0-7) corresponding to
@@ -360,13 +348,14 @@ static void ich9_set_sci(void *opaque, int irq_num, int level)
}
lpc->sci_level = level;
- irq = ich9_lpc_sci_irq(lpc);
+ irq = lpc->sci_gsi;
if (irq < 0) {
return;
}
- ich9_lpc_update_apic(lpc, irq);
- if (irq < ICH9_LPC_PIC_NUM_PINS) {
+ if (irq >= ICH9_LPC_PIC_NUM_PINS) {
+ ich9_lpc_update_apic(lpc, irq);
+ } else {
ich9_lpc_update_pic(lpc, irq);
}
}
@@ -403,12 +392,27 @@ static void ich9_apm_ctrl_changed(uint32_t val, void *arg)
/* config:PMBASE */
static void
-ich9_lpc_pmbase_update(ICH9LPCState *lpc)
+ich9_lpc_pmbase_sci_update(ICH9LPCState *lpc)
{
uint32_t pm_io_base = pci_get_long(lpc->d.config + ICH9_LPC_PMBASE);
- pm_io_base &= ICH9_LPC_PMBASE_BASE_ADDRESS_MASK;
+ uint8_t acpi_cntl = pci_get_long(lpc->d.config + ICH9_LPC_ACPI_CTRL);
+ uint8_t new_gsi;
+
+ if (acpi_cntl & ICH9_LPC_ACPI_CTRL_ACPI_EN) {
+ pm_io_base &= ICH9_LPC_PMBASE_BASE_ADDRESS_MASK;
+ } else {
+ pm_io_base = 0;
+ }
ich9_pm_iospace_update(&lpc->pm, pm_io_base);
+
+ new_gsi = ich9_lpc_sci_irq(lpc);
+ if (lpc->sci_level && new_gsi != lpc->sci_gsi) {
+ qemu_set_irq(lpc->pm.irq, 0);
+ lpc->sci_gsi = new_gsi;
+ qemu_set_irq(lpc->pm.irq, 1);
+ }
+ lpc->sci_gsi = new_gsi;
}
/* config:RCBA */
@@ -445,7 +449,7 @@ static int ich9_lpc_post_load(void *opaque, int version_id)
{
ICH9LPCState *lpc = opaque;
- ich9_lpc_pmbase_update(lpc);
+ ich9_lpc_pmbase_sci_update(lpc);
ich9_lpc_rcba_update(lpc, 0 /* disabled ICH9_LPC_RCBA_EN */);
ich9_lpc_pmcon_update(lpc);
return 0;
@@ -458,8 +462,9 @@ static void ich9_lpc_config_write(PCIDevice *d,
uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA);
pci_default_write_config(d, addr, val, len);
- if (ranges_overlap(addr, len, ICH9_LPC_PMBASE, 4)) {
- ich9_lpc_pmbase_update(lpc);
+ if (ranges_overlap(addr, len, ICH9_LPC_PMBASE, 4) ||
+ ranges_overlap(addr, len, ICH9_LPC_ACPI_CTRL, 1)) {
+ ich9_lpc_pmbase_sci_update(lpc);
}
if (ranges_overlap(addr, len, ICH9_LPC_RCBA, 4)) {
ich9_lpc_rcba_update(lpc, rcba_old);
@@ -497,7 +502,7 @@ static void ich9_lpc_reset(DeviceState *qdev)
ich9_cc_reset(lpc);
- ich9_lpc_pmbase_update(lpc);
+ ich9_lpc_pmbase_sci_update(lpc);
ich9_lpc_rcba_update(lpc, rcba_old);
lpc->sci_level = 0;
@@ -577,7 +582,7 @@ static void ich9_lpc_get_sci_int(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(obj);
- uint32_t value = ich9_lpc_sci_irq(lpc);
+ uint32_t value = lpc->sci_gsi;
visit_type_uint32(v, name, &value, errp);
}
@@ -608,6 +613,7 @@ static void ich9_lpc_initfn(Object *obj)
static void ich9_lpc_realize(PCIDevice *d, Error **errp)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
+ DeviceState *dev = DEVICE(d);
ISABus *isa_bus;
isa_bus = isa_bus_new(DEVICE(d), get_system_memory(), get_system_io(),
@@ -618,6 +624,9 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
pci_set_long(d->wmask + ICH9_LPC_PMBASE,
ICH9_LPC_PMBASE_BASE_ADDRESS_MASK);
+ pci_set_byte(d->wmask + ICH9_LPC_PMBASE,
+ ICH9_LPC_ACPI_CTRL_ACPI_EN |
+ ICH9_LPC_ACPI_CTRL_SCI_IRQ_SEL_MASK);
memory_region_init_io(&lpc->rcrb_mem, OBJECT(d), &rcrb_mmio_ops, lpc,
"lpc-rcrb-mmio", ICH9_CC_SIZE);
@@ -635,6 +644,10 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
memory_region_add_subregion_overlap(pci_address_space_io(d),
ICH9_RST_CNT_IOPORT, &lpc->rst_cnt_mem,
1);
+
+ qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, GSI_NUM_PINS);
+
+ isa_bus_irqs(isa_bus, lpc->gsi);
}
static bool ich9_rst_cnt_needed(void *opaque)
@@ -714,6 +727,7 @@ static void ich9_lpc_class_init(ObjectClass *klass, void *data)
hc->unplug = ich9_pm_device_unplug_cb;
adevc->ospm_status = ich9_pm_ospm_status;
adevc->send_event = ich9_send_gpe;
+ adevc->madt_cpu = pc_madt_cpu_entry;
}
static const TypeInfo ich9_lpc_info = {
diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c
index 0a602f28ba..81896c0e84 100644
--- a/hw/mem/nvdimm.c
+++ b/hw/mem/nvdimm.c
@@ -23,20 +23,152 @@
*/
#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
#include "hw/mem/nvdimm.h"
+static void nvdimm_get_label_size(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ NVDIMMDevice *nvdimm = NVDIMM(obj);
+ uint64_t value = nvdimm->label_size;
+
+ visit_type_size(v, name, &value, errp);
+}
+
+static void nvdimm_set_label_size(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ NVDIMMDevice *nvdimm = NVDIMM(obj);
+ Error *local_err = NULL;
+ uint64_t value;
+
+ if (memory_region_size(&nvdimm->nvdimm_mr)) {
+ error_setg(&local_err, "cannot change property value");
+ goto out;
+ }
+
+ visit_type_size(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ if (value < MIN_NAMESPACE_LABEL_SIZE) {
+ error_setg(&local_err, "Property '%s.%s' (0x%" PRIx64 ") is required"
+ " at least 0x%lx", object_get_typename(obj),
+ name, value, MIN_NAMESPACE_LABEL_SIZE);
+ goto out;
+ }
+
+ nvdimm->label_size = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void nvdimm_init(Object *obj)
+{
+ object_property_add(obj, "label-size", "int",
+ nvdimm_get_label_size, nvdimm_set_label_size, NULL,
+ NULL, NULL);
+}
+
+static MemoryRegion *nvdimm_get_memory_region(PCDIMMDevice *dimm)
+{
+ NVDIMMDevice *nvdimm = NVDIMM(dimm);
+
+ return &nvdimm->nvdimm_mr;
+}
+
+static void nvdimm_realize(PCDIMMDevice *dimm, Error **errp)
+{
+ MemoryRegion *mr = host_memory_backend_get_memory(dimm->hostmem, errp);
+ NVDIMMDevice *nvdimm = NVDIMM(dimm);
+ uint64_t align, pmem_size, size = memory_region_size(mr);
+
+ align = memory_region_get_alignment(mr);
+
+ pmem_size = size - nvdimm->label_size;
+ nvdimm->label_data = memory_region_get_ram_ptr(mr) + pmem_size;
+ pmem_size = QEMU_ALIGN_DOWN(pmem_size, align);
+
+ if (size <= nvdimm->label_size || !pmem_size) {
+ HostMemoryBackend *hostmem = dimm->hostmem;
+ char *path = object_get_canonical_path_component(OBJECT(hostmem));
+
+ error_setg(errp, "the size of memdev %s (0x%" PRIx64 ") is too "
+ "small to contain nvdimm label (0x%" PRIx64 ") and "
+ "aligned PMEM (0x%" PRIx64 ")",
+ path, memory_region_size(mr), nvdimm->label_size, align);
+ return;
+ }
+
+ memory_region_init_alias(&nvdimm->nvdimm_mr, OBJECT(dimm),
+ "nvdimm-memory", mr, 0, pmem_size);
+ nvdimm->nvdimm_mr.align = align;
+}
+
+/*
+ * the caller should check the input parameters before calling
+ * label read/write functions.
+ */
+static void nvdimm_validate_rw_label_data(NVDIMMDevice *nvdimm, uint64_t size,
+ uint64_t offset)
+{
+ assert((nvdimm->label_size >= size + offset) && (offset + size > offset));
+}
+
+static void nvdimm_read_label_data(NVDIMMDevice *nvdimm, void *buf,
+ uint64_t size, uint64_t offset)
+{
+ nvdimm_validate_rw_label_data(nvdimm, size, offset);
+
+ memcpy(buf, nvdimm->label_data + offset, size);
+}
+
+static void nvdimm_write_label_data(NVDIMMDevice *nvdimm, const void *buf,
+ uint64_t size, uint64_t offset)
+{
+ MemoryRegion *mr;
+ PCDIMMDevice *dimm = PC_DIMM(nvdimm);
+ uint64_t backend_offset;
+
+ nvdimm_validate_rw_label_data(nvdimm, size, offset);
+
+ memcpy(nvdimm->label_data + offset, buf, size);
+
+ mr = host_memory_backend_get_memory(dimm->hostmem, &error_abort);
+ backend_offset = memory_region_size(mr) - nvdimm->label_size + offset;
+ memory_region_set_dirty(mr, backend_offset, size);
+}
+
+static MemoryRegion *nvdimm_get_vmstate_memory_region(PCDIMMDevice *dimm)
+{
+ return host_memory_backend_get_memory(dimm->hostmem, &error_abort);
+}
+
static void nvdimm_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
+ PCDIMMDeviceClass *ddc = PC_DIMM_CLASS(oc);
+ NVDIMMClass *nvc = NVDIMM_CLASS(oc);
/* nvdimm hotplug has not been supported yet. */
dc->hotpluggable = false;
+
+ ddc->realize = nvdimm_realize;
+ ddc->get_memory_region = nvdimm_get_memory_region;
+ ddc->get_vmstate_memory_region = nvdimm_get_vmstate_memory_region;
+
+ nvc->read_label_data = nvdimm_read_label_data;
+ nvc->write_label_data = nvdimm_write_label_data;
}
static TypeInfo nvdimm_info = {
.name = TYPE_NVDIMM,
.parent = TYPE_PC_DIMM,
+ .class_size = sizeof(NVDIMMClass),
.class_init = nvdimm_class_init,
+ .instance_size = sizeof(NVDIMMDevice),
+ .instance_init = nvdimm_init,
};
static void nvdimm_register_types(void)
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 6de2275986..249193a543 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -40,6 +40,8 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
int slot;
MachineState *machine = MACHINE(qdev_get_machine());
PCDIMMDevice *dimm = PC_DIMM(dev);
+ PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
+ MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
Error *local_err = NULL;
uint64_t existing_dimms_capacity = 0;
uint64_t addr;
@@ -105,7 +107,7 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
}
memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
- vmstate_register_ram(mr, dev);
+ vmstate_register_ram(vmstate_mr, dev);
numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
out:
@@ -116,10 +118,12 @@ void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
MemoryRegion *mr)
{
PCDIMMDevice *dimm = PC_DIMM(dev);
+ PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
+ MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
numa_unset_mem_node_id(dimm->addr, memory_region_size(mr), dimm->node);
memory_region_del_subregion(&hpms->mr, mr);
- vmstate_unregister_ram(mr, dev);
+ vmstate_unregister_ram(vmstate_mr, dev);
}
static int pc_existing_dimms_capacity_internal(Object *obj, void *opaque)
@@ -424,6 +428,11 @@ static MemoryRegion *pc_dimm_get_memory_region(PCDIMMDevice *dimm)
return host_memory_backend_get_memory(dimm->hostmem, &error_abort);
}
+static MemoryRegion *pc_dimm_get_vmstate_memory_region(PCDIMMDevice *dimm)
+{
+ return host_memory_backend_get_memory(dimm->hostmem, &error_abort);
+}
+
static void pc_dimm_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
@@ -434,6 +443,7 @@ static void pc_dimm_class_init(ObjectClass *oc, void *data)
dc->desc = "DIMM memory module";
ddc->get_memory_region = pc_dimm_get_memory_region;
+ ddc->get_vmstate_memory_region = pc_dimm_get_vmstate_memory_region;
}
static TypeInfo pc_dimm_info = {
diff --git a/hw/microblaze/petalogix_ml605_mmu.c b/hw/microblaze/petalogix_ml605_mmu.c
index 07527b677b..4968bdbb28 100644
--- a/hw/microblaze/petalogix_ml605_mmu.c
+++ b/hw/microblaze/petalogix_ml605_mmu.c
@@ -191,9 +191,16 @@ petalogix_ml605_init(MachineState *machine)
spi = (SSIBus *)qdev_get_child_bus(dev, "spi");
for (i = 0; i < NUM_SPI_FLASHES; i++) {
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
qemu_irq cs_line;
- dev = ssi_create_slave(spi, "n25q128");
+ dev = ssi_create_slave_no_init(spi, "n25q128");
+ if (dinfo) {
+ qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo),
+ &error_fatal);
+ }
+ qdev_init_nofail(dev);
+
cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0);
sysbus_connect_irq(busdev, i+1, cs_line);
}
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index ffb49c11ac..54020aa06c 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -52,3 +52,4 @@ obj-$(CONFIG_PVPANIC) += pvpanic.o
obj-$(CONFIG_EDU) += edu.o
obj-$(CONFIG_HYPERV_TESTDEV) += hyperv_testdev.o
obj-$(CONFIG_AUX) += aux.o
+obj-$(CONFIG_ASPEED_SOC) += aspeed_scu.o
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
new file mode 100644
index 0000000000..23f51752b0
--- /dev/null
+++ b/hw/misc/aspeed_scu.c
@@ -0,0 +1,284 @@
+/*
+ * ASPEED System Control Unit
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/misc/aspeed_scu.h"
+#include "hw/qdev-properties.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qemu/bitops.h"
+#include "qemu/log.h"
+#include "trace.h"
+
+#define TO_REG(offset) ((offset) >> 2)
+
+#define PROT_KEY TO_REG(0x00)
+#define SYS_RST_CTRL TO_REG(0x04)
+#define CLK_SEL TO_REG(0x08)
+#define CLK_STOP_CTRL TO_REG(0x0C)
+#define FREQ_CNTR_CTRL TO_REG(0x10)
+#define FREQ_CNTR_EVAL TO_REG(0x14)
+#define IRQ_CTRL TO_REG(0x18)
+#define D2PLL_PARAM TO_REG(0x1C)
+#define MPLL_PARAM TO_REG(0x20)
+#define HPLL_PARAM TO_REG(0x24)
+#define FREQ_CNTR_RANGE TO_REG(0x28)
+#define MISC_CTRL1 TO_REG(0x2C)
+#define PCI_CTRL1 TO_REG(0x30)
+#define PCI_CTRL2 TO_REG(0x34)
+#define PCI_CTRL3 TO_REG(0x38)
+#define SYS_RST_STATUS TO_REG(0x3C)
+#define SOC_SCRATCH1 TO_REG(0x40)
+#define SOC_SCRATCH2 TO_REG(0x44)
+#define MAC_CLK_DELAY TO_REG(0x48)
+#define MISC_CTRL2 TO_REG(0x4C)
+#define VGA_SCRATCH1 TO_REG(0x50)
+#define VGA_SCRATCH2 TO_REG(0x54)
+#define VGA_SCRATCH3 TO_REG(0x58)
+#define VGA_SCRATCH4 TO_REG(0x5C)
+#define VGA_SCRATCH5 TO_REG(0x60)
+#define VGA_SCRATCH6 TO_REG(0x64)
+#define VGA_SCRATCH7 TO_REG(0x68)
+#define VGA_SCRATCH8 TO_REG(0x6C)
+#define HW_STRAP1 TO_REG(0x70)
+#define RNG_CTRL TO_REG(0x74)
+#define RNG_DATA TO_REG(0x78)
+#define SILICON_REV TO_REG(0x7C)
+#define PINMUX_CTRL1 TO_REG(0x80)
+#define PINMUX_CTRL2 TO_REG(0x84)
+#define PINMUX_CTRL3 TO_REG(0x88)
+#define PINMUX_CTRL4 TO_REG(0x8C)
+#define PINMUX_CTRL5 TO_REG(0x90)
+#define PINMUX_CTRL6 TO_REG(0x94)
+#define WDT_RST_CTRL TO_REG(0x9C)
+#define PINMUX_CTRL7 TO_REG(0xA0)
+#define PINMUX_CTRL8 TO_REG(0xA4)
+#define PINMUX_CTRL9 TO_REG(0xA8)
+#define WAKEUP_EN TO_REG(0xC0)
+#define WAKEUP_CTRL TO_REG(0xC4)
+#define HW_STRAP2 TO_REG(0xD0)
+#define FREE_CNTR4 TO_REG(0xE0)
+#define FREE_CNTR4_EXT TO_REG(0xE4)
+#define CPU2_CTRL TO_REG(0x100)
+#define CPU2_BASE_SEG1 TO_REG(0x104)
+#define CPU2_BASE_SEG2 TO_REG(0x108)
+#define CPU2_BASE_SEG3 TO_REG(0x10C)
+#define CPU2_BASE_SEG4 TO_REG(0x110)
+#define CPU2_BASE_SEG5 TO_REG(0x114)
+#define CPU2_CACHE_CTRL TO_REG(0x118)
+#define UART_HPLL_CLK TO_REG(0x160)
+#define PCIE_CTRL TO_REG(0x180)
+#define BMC_MMIO_CTRL TO_REG(0x184)
+#define RELOC_DECODE_BASE1 TO_REG(0x188)
+#define RELOC_DECODE_BASE2 TO_REG(0x18C)
+#define MAILBOX_DECODE_BASE TO_REG(0x190)
+#define SRAM_DECODE_BASE1 TO_REG(0x194)
+#define SRAM_DECODE_BASE2 TO_REG(0x198)
+#define BMC_REV TO_REG(0x19C)
+#define BMC_DEV_ID TO_REG(0x1A4)
+
+#define PROT_KEY_UNLOCK 0x1688A8A8
+#define SCU_IO_REGION_SIZE 0x20000
+
+#define AST2400_A0_SILICON_REV 0x02000303U
+
+static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = {
+ [SYS_RST_CTRL] = 0xFFCFFEDCU,
+ [CLK_SEL] = 0xF3F40000U,
+ [CLK_STOP_CTRL] = 0x19FC3E8BU,
+ [D2PLL_PARAM] = 0x00026108U,
+ [MPLL_PARAM] = 0x00030291U,
+ [HPLL_PARAM] = 0x00000291U,
+ [MISC_CTRL1] = 0x00000010U,
+ [PCI_CTRL1] = 0x20001A03U,
+ [PCI_CTRL2] = 0x20001A03U,
+ [PCI_CTRL3] = 0x04000030U,
+ [SYS_RST_STATUS] = 0x00000001U,
+ [SOC_SCRATCH1] = 0x000000C0U, /* SoC completed DRAM init */
+ [MISC_CTRL2] = 0x00000023U,
+ [RNG_CTRL] = 0x0000000EU,
+ [PINMUX_CTRL2] = 0x0000F000U,
+ [PINMUX_CTRL3] = 0x01000000U,
+ [PINMUX_CTRL4] = 0x000000FFU,
+ [PINMUX_CTRL5] = 0x0000A000U,
+ [WDT_RST_CTRL] = 0x003FFFF3U,
+ [PINMUX_CTRL8] = 0xFFFF0000U,
+ [PINMUX_CTRL9] = 0x000FFFFFU,
+ [FREE_CNTR4] = 0x000000FFU,
+ [FREE_CNTR4_EXT] = 0x000000FFU,
+ [CPU2_BASE_SEG1] = 0x80000000U,
+ [CPU2_BASE_SEG4] = 0x1E600000U,
+ [CPU2_BASE_SEG5] = 0xC0000000U,
+ [UART_HPLL_CLK] = 0x00001903U,
+ [PCIE_CTRL] = 0x0000007BU,
+ [BMC_DEV_ID] = 0x00002402U
+};
+
+static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size)
+{
+ AspeedSCUState *s = ASPEED_SCU(opaque);
+ int reg = TO_REG(offset);
+
+ if (reg >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return 0;
+ }
+
+ switch (reg) {
+ case WAKEUP_EN:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Read of write-only offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ break;
+ }
+
+ return s->regs[reg];
+}
+
+static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data,
+ unsigned size)
+{
+ AspeedSCUState *s = ASPEED_SCU(opaque);
+ int reg = TO_REG(offset);
+
+ if (reg >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return;
+ }
+
+ if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 &&
+ s->regs[PROT_KEY] != PROT_KEY_UNLOCK) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__);
+ return;
+ }
+
+ trace_aspeed_scu_write(offset, size, data);
+
+ switch (reg) {
+ case FREQ_CNTR_EVAL:
+ case VGA_SCRATCH1 ... VGA_SCRATCH8:
+ case RNG_DATA:
+ case SILICON_REV:
+ case FREE_CNTR4:
+ case FREE_CNTR4_EXT:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
+ return;
+ }
+
+ s->regs[reg] = data;
+}
+
+static const MemoryRegionOps aspeed_scu_ops = {
+ .read = aspeed_scu_read,
+ .write = aspeed_scu_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .valid.unaligned = false,
+};
+
+static void aspeed_scu_reset(DeviceState *dev)
+{
+ AspeedSCUState *s = ASPEED_SCU(dev);
+ const uint32_t *reset;
+
+ switch (s->silicon_rev) {
+ case AST2400_A0_SILICON_REV:
+ reset = ast2400_a0_resets;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ memcpy(s->regs, reset, sizeof(s->regs));
+ s->regs[SILICON_REV] = s->silicon_rev;
+ s->regs[HW_STRAP1] = s->hw_strap1;
+ s->regs[HW_STRAP2] = s->hw_strap2;
+}
+
+static uint32_t aspeed_silicon_revs[] = { AST2400_A0_SILICON_REV, };
+
+static bool is_supported_silicon_rev(uint32_t silicon_rev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_silicon_revs); i++) {
+ if (silicon_rev == aspeed_silicon_revs[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void aspeed_scu_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AspeedSCUState *s = ASPEED_SCU(dev);
+
+ if (!is_supported_silicon_rev(s->silicon_rev)) {
+ error_setg(errp, "Unknown silicon revision: 0x%" PRIx32,
+ s->silicon_rev);
+ return;
+ }
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_scu_ops, s,
+ TYPE_ASPEED_SCU, SCU_IO_REGION_SIZE);
+
+ sysbus_init_mmio(sbd, &s->iomem);
+}
+
+static const VMStateDescription vmstate_aspeed_scu = {
+ .name = "aspeed.scu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AspeedSCUState, ASPEED_SCU_NR_REGS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property aspeed_scu_properties[] = {
+ DEFINE_PROP_UINT32("silicon-rev", AspeedSCUState, silicon_rev, 0),
+ DEFINE_PROP_UINT32("hw-strap1", AspeedSCUState, hw_strap1, 0),
+ DEFINE_PROP_UINT32("hw-strap2", AspeedSCUState, hw_strap1, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_scu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->realize = aspeed_scu_realize;
+ dc->reset = aspeed_scu_reset;
+ dc->desc = "ASPEED System Control Unit";
+ dc->vmsd = &vmstate_aspeed_scu;
+ dc->props = aspeed_scu_properties;
+}
+
+static const TypeInfo aspeed_scu_info = {
+ .name = TYPE_ASPEED_SCU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AspeedSCUState),
+ .class_init = aspeed_scu_class_init,
+};
+
+static void aspeed_scu_register_types(void)
+{
+ type_register_static(&aspeed_scu_info);
+}
+
+type_init(aspeed_scu_register_types);
diff --git a/hw/misc/max111x.c b/hw/misc/max111x.c
index 9014f0f705..2a277bdb86 100644
--- a/hw/misc/max111x.c
+++ b/hw/misc/max111x.c
@@ -147,14 +147,14 @@ static int max111x_init(SSISlave *d, int inputs)
return 0;
}
-static int max1110_init(SSISlave *dev)
+static void max1110_realize(SSISlave *dev, Error **errp)
{
- return max111x_init(dev, 8);
+ max111x_init(dev, 8);
}
-static int max1111_init(SSISlave *dev)
+static void max1111_realize(SSISlave *dev, Error **errp)
{
- return max111x_init(dev, 4);
+ max111x_init(dev, 4);
}
void max111x_set_input(DeviceState *dev, int line, uint8_t value)
@@ -183,7 +183,7 @@ static void max1110_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = max1110_init;
+ k->realize = max1110_realize;
}
static const TypeInfo max1110_info = {
@@ -196,7 +196,7 @@ static void max1111_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = max1111_init;
+ k->realize = max1111_realize;
}
static const TypeInfo max1111_info = {
diff --git a/hw/misc/trace-events b/hw/misc/trace-events
index 16b6701cbf..ea52a14d78 100644
--- a/hw/misc/trace-events
+++ b/hw/misc/trace-events
@@ -50,3 +50,6 @@ milkymist_pfpu_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x"
milkymist_pfpu_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x"
milkymist_pfpu_vectout(uint32_t a, uint32_t b, uint32_t dma_ptr) "a %08x b %08x dma_ptr %08x"
milkymist_pfpu_pulse_irq(void) "Pulse IRQ"
+
+# hw/misc/aspeed_scu.c
+aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32
diff --git a/hw/misc/vmport.c b/hw/misc/vmport.c
index 6896789801..c763811a9f 100644
--- a/hw/misc/vmport.c
+++ b/hw/misc/vmport.c
@@ -36,7 +36,6 @@
#define VMPORT_ENTRIES 0x2c
#define VMPORT_MAGIC 0x564D5868
-#define TYPE_VMPORT "vmport"
#define VMPORT(obj) OBJECT_CHECK(VMPortState, (obj), TYPE_VMPORT)
typedef struct VMPortState
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index 0346f3e335..8a4be1e667 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -274,6 +274,11 @@ static inline unsigned tx_desc_get_last(unsigned *desc)
return (desc[1] & DESC_1_TX_LAST) ? 1 : 0;
}
+static inline void tx_desc_set_last(unsigned *desc)
+{
+ desc[1] |= DESC_1_TX_LAST;
+}
+
static inline unsigned tx_desc_get_length(unsigned *desc)
{
return desc[1] & DESC_1_LENGTH;
@@ -664,6 +669,13 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL;
bytes_to_copy = size;
+ /* Hardware allows a zero value here but warns against it. To avoid QEMU
+ * indefinite loops we enforce a minimum value here
+ */
+ if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) {
+ rxbufsize = GEM_DMACFG_RBUFSZ_MUL;
+ }
+
/* Pad to minimum length. Assume FCS field is stripped, logic
* below will increment it to the real minimum of 64 when
* not FCS stripping
@@ -932,6 +944,7 @@ static void gem_transmit(CadenceGEMState *s)
/* read next descriptor */
if (tx_desc_get_wrap(desc)) {
+ tx_desc_set_last(desc);
packet_desc_addr = s->regs[GEM_TXQBASE];
} else {
packet_desc_addr += 8;
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 1202371271..06ca7b2638 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -536,7 +536,7 @@ e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
static void
xmit_seg(E1000State *s)
{
- uint16_t len, *sp;
+ uint16_t len;
unsigned int frames = s->tx.tso_frames, css, sofar;
struct e1000_tx *tp = &s->tx;
@@ -547,7 +547,7 @@ xmit_seg(E1000State *s)
if (tp->props.ip) { /* IPv4 */
stw_be_p(tp->data+css+2, tp->size - css);
stw_be_p(tp->data+css+4,
- be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
+ lduw_be_p(tp->data + css + 4) + frames);
} else { /* IPv6 */
stw_be_p(tp->data+css+4, tp->size - css);
}
@@ -567,8 +567,9 @@ xmit_seg(E1000State *s)
if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) {
unsigned int phsum;
// add pseudo-header length before checksum calculation
- sp = (uint16_t *)(tp->data + tp->props.tucso);
- phsum = be16_to_cpup(sp) + len;
+ void *sp = tp->data + tp->props.tucso;
+
+ phsum = lduw_be_p(sp) + len;
phsum = (phsum >> 16) + (phsum & 0xffff);
stw_be_p(sp, phsum);
}
@@ -759,9 +760,9 @@ receive_filter(E1000State *s, const uint8_t *buf, int size)
if (e1000x_is_vlan_packet(buf, le16_to_cpu(s->mac_reg[VET])) &&
e1000x_vlan_rx_filter_enabled(s->mac_reg)) {
- uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
- uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
- ((vid >> 5) & 0x7f));
+ uint16_t vid = lduw_be_p(buf + 14);
+ uint32_t vfta = ldl_le_p((uint32_t*)(s->mac_reg + VFTA) +
+ ((vid >> 5) & 0x7f));
if ((vfta & (1 << (vid & 0x1f))) == 0)
return 0;
}
@@ -889,8 +890,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
if (e1000x_vlan_enabled(s->mac_reg) &&
e1000x_is_vlan_packet(filter_buf, le16_to_cpu(s->mac_reg[VET]))) {
- vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
- + 14)));
+ vlan_special = cpu_to_le16(lduw_be_p(filter_buf + 14));
iov_ofs = 4;
if (filter_buf == iov->iov_base) {
memmove(filter_buf + 4, filter_buf, 12);
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index 4549acb120..6050d8b7f8 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -1019,9 +1019,9 @@ e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size)
if (e1000x_is_vlan_packet(buf, core->vet) &&
e1000x_vlan_rx_filter_enabled(core->mac)) {
- uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
- uint32_t vfta = le32_to_cpup((uint32_t *)(core->mac + VFTA) +
- ((vid >> 5) & 0x7f));
+ uint16_t vid = lduw_be_p(buf + 14);
+ uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) +
+ ((vid >> 5) & 0x7f));
if ((vfta & (1 << (vid & 0x1f))) == 0) {
trace_e1000e_rx_flt_vlan_mismatch(vid);
return false;
diff --git a/hw/net/e1000x_common.c b/hw/net/e1000x_common.c
index 94f85c98c8..eb0e097137 100644
--- a/hw/net/e1000x_common.c
+++ b/hw/net/e1000x_common.c
@@ -47,7 +47,7 @@ bool e1000x_rx_ready(PCIDevice *d, uint32_t *mac)
bool e1000x_is_vlan_packet(const uint8_t *buf, uint16_t vet)
{
- uint16_t eth_proto = be16_to_cpup((uint16_t *)(buf + 12));
+ uint16_t eth_proto = lduw_be_p(buf + 12);
bool res = (eth_proto == vet);
trace_e1000x_vlan_is_vlan_pkt(res, eth_proto, vet);
diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
index 9b4b9b59d2..b10c419838 100644
--- a/hw/net/eepro100.c
+++ b/hw/net/eepro100.c
@@ -352,14 +352,14 @@ static unsigned e100_compute_mcast_idx(const uint8_t *ep)
static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr)
{
assert(!((uintptr_t)&s->mem[addr] & 1));
- return le16_to_cpup((uint16_t *)&s->mem[addr]);
+ return lduw_le_p(&s->mem[addr]);
}
/* Read a 32 bit control/status (CSR) register. */
static uint32_t e100_read_reg4(EEPRO100State *s, E100RegisterOffset addr)
{
assert(!((uintptr_t)&s->mem[addr] & 3));
- return le32_to_cpup((uint32_t *)&s->mem[addr]);
+ return ldl_le_p(&s->mem[addr]);
}
/* Write a 16 bit control/status (CSR) register. */
@@ -367,7 +367,7 @@ static void e100_write_reg2(EEPRO100State *s, E100RegisterOffset addr,
uint16_t val)
{
assert(!((uintptr_t)&s->mem[addr] & 1));
- cpu_to_le16w((uint16_t *)&s->mem[addr], val);
+ stw_le_p(&s->mem[addr], val);
}
/* Read a 32 bit control/status (CSR) register. */
@@ -375,7 +375,7 @@ static void e100_write_reg4(EEPRO100State *s, E100RegisterOffset addr,
uint32_t val)
{
assert(!((uintptr_t)&s->mem[addr] & 3));
- cpu_to_le32w((uint32_t *)&s->mem[addr], val);
+ stl_le_p(&s->mem[addr], val);
}
#if defined(DEBUG_EEPRO100)
diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c
index cf8b8236df..5115adcaea 100644
--- a/hw/net/mipsnet.c
+++ b/hw/net/mipsnet.c
@@ -183,10 +183,12 @@ static void mipsnet_ioport_write(void *opaque, hwaddr addr,
break;
case MIPSNET_TX_DATA_BUFFER:
s->tx_buffer[s->tx_written++] = val;
- if (s->tx_written == s->tx_count) {
+ if ((s->tx_written >= MAX_ETH_FRAME_SIZE)
+ || (s->tx_written == s->tx_count)) {
/* Send buffer. */
- trace_mipsnet_send(s->tx_count);
- qemu_send_packet(qemu_get_queue(s->nic), s->tx_buffer, s->tx_count);
+ trace_mipsnet_send(s->tx_written);
+ qemu_send_packet(qemu_get_queue(s->nic),
+ s->tx_buffer, s->tx_written);
s->tx_count = s->tx_written = 0;
s->intctl |= MIPSNET_INTCTL_TXDONE;
s->busy = 1;
diff --git a/hw/net/rocker/rocker_tlv.h b/hw/net/rocker/rocker_tlv.h
index e3c4ab6793..88561648f0 100644
--- a/hw/net/rocker/rocker_tlv.h
+++ b/hw/net/rocker/rocker_tlv.h
@@ -106,17 +106,17 @@ static inline uint64_t rocker_tlv_get_u64(const RockerTlv *tlv)
static inline uint16_t rocker_tlv_get_le16(const RockerTlv *tlv)
{
- return le16_to_cpup((uint16_t *) rocker_tlv_data(tlv));
+ return lduw_le_p(rocker_tlv_data(tlv));
}
static inline uint32_t rocker_tlv_get_le32(const RockerTlv *tlv)
{
- return le32_to_cpup((uint32_t *) rocker_tlv_data(tlv));
+ return ldl_le_p(rocker_tlv_data(tlv));
}
static inline uint64_t rocker_tlv_get_le64(const RockerTlv *tlv)
{
- return le64_to_cpup((uint64_t *) rocker_tlv_data(tlv));
+ return ldq_le_p(rocker_tlv_data(tlv));
}
static inline void rocker_tlv_parse(RockerTlv **tb, int maxtype,
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index 562c1fded2..07297cb78f 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -1013,8 +1013,8 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t
uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK;
/* write VLAN info to descriptor variables. */
- if (s->CpCmd & CPlusRxVLAN && be16_to_cpup((uint16_t *)
- &buf[ETH_ALEN * 2]) == ETH_P_VLAN) {
+ if (s->CpCmd & CPlusRxVLAN &&
+ lduw_be_p(&buf[ETH_ALEN * 2]) == ETH_P_VLAN) {
dot1q_buf = &buf[ETH_ALEN * 2];
size -= VLAN_HLEN;
/* if too small buffer, use the tailroom added duing expansion */
@@ -1024,11 +1024,10 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t
rxdw1 &= ~CP_RX_VLAN_TAG_MASK;
/* BE + ~le_to_cpu()~ + cpu_to_le() = BE */
- rxdw1 |= CP_RX_TAVA | le16_to_cpup((uint16_t *)
- &dot1q_buf[ETHER_TYPE_LEN]);
+ rxdw1 |= CP_RX_TAVA | lduw_le_p(&dot1q_buf[ETHER_TYPE_LEN]);
DPRINTF("C+ Rx mode : extracted vlan tag with tci: ""%u\n",
- be16_to_cpup((uint16_t *)&dot1q_buf[ETHER_TYPE_LEN]));
+ lduw_be_p(&dot1q_buf[ETHER_TYPE_LEN]));
} else {
/* reset VLAN tag flag */
rxdw1 &= ~CP_RX_TAVA;
@@ -1352,29 +1351,6 @@ static void RTL8139TallyCounters_dma_write(RTL8139State *s, dma_addr_t tc_addr)
pci_dma_write(d, tc_addr + 62, (uint8_t *)&val16, 2);
}
-/* Loads values of tally counters from VM state file */
-
-static const VMStateDescription vmstate_tally_counters = {
- .name = "tally_counters",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (VMStateField[]) {
- VMSTATE_UINT64(TxOk, RTL8139TallyCounters),
- VMSTATE_UINT64(RxOk, RTL8139TallyCounters),
- VMSTATE_UINT64(TxERR, RTL8139TallyCounters),
- VMSTATE_UINT32(RxERR, RTL8139TallyCounters),
- VMSTATE_UINT16(MissPkt, RTL8139TallyCounters),
- VMSTATE_UINT16(FAE, RTL8139TallyCounters),
- VMSTATE_UINT32(Tx1Col, RTL8139TallyCounters),
- VMSTATE_UINT32(TxMCol, RTL8139TallyCounters),
- VMSTATE_UINT64(RxOkPhy, RTL8139TallyCounters),
- VMSTATE_UINT64(RxOkBrd, RTL8139TallyCounters),
- VMSTATE_UINT16(TxAbt, RTL8139TallyCounters),
- VMSTATE_UINT16(TxUndrn, RTL8139TallyCounters),
- VMSTATE_END_OF_LIST()
- }
-};
-
static void rtl8139_ChipCmd_write(RTL8139State *s, uint32_t val)
{
DeviceState *d = DEVICE(s);
@@ -3222,7 +3198,7 @@ static void rtl8139_pre_save(void *opaque)
static const VMStateDescription vmstate_rtl8139 = {
.name = "rtl8139",
- .version_id = 4,
+ .version_id = 5,
.minimum_version_id = 3,
.post_load = rtl8139_post_load,
.pre_save = rtl8139_pre_save,
@@ -3293,8 +3269,19 @@ static const VMStateDescription vmstate_rtl8139 = {
VMSTATE_UINT32(TimerInt, RTL8139State),
VMSTATE_INT64(TCTR_base, RTL8139State),
- VMSTATE_STRUCT(tally_counters, RTL8139State, 0,
- vmstate_tally_counters, RTL8139TallyCounters),
+ VMSTATE_UINT64(tally_counters.TxOk, RTL8139State),
+ VMSTATE_UINT64(tally_counters.RxOk, RTL8139State),
+ VMSTATE_UINT64(tally_counters.TxERR, RTL8139State),
+ VMSTATE_UINT32(tally_counters.RxERR, RTL8139State),
+ VMSTATE_UINT16(tally_counters.MissPkt, RTL8139State),
+ VMSTATE_UINT16(tally_counters.FAE, RTL8139State),
+ VMSTATE_UINT32(tally_counters.Tx1Col, RTL8139State),
+ VMSTATE_UINT32(tally_counters.TxMCol, RTL8139State),
+ VMSTATE_UINT64(tally_counters.RxOkPhy, RTL8139State),
+ VMSTATE_UINT64(tally_counters.RxOkBrd, RTL8139State),
+ VMSTATE_UINT32_V(tally_counters.RxOkMul, RTL8139State, 5),
+ VMSTATE_UINT16(tally_counters.TxAbt, RTL8139State),
+ VMSTATE_UINT16(tally_counters.TxUndrn, RTL8139State),
VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4),
VMSTATE_END_OF_LIST()
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5798f87d8e..7e6a60aa12 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1051,7 +1051,7 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
ptr += n->host_hdr_len;
if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
- int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
+ int vid = lduw_be_p(ptr + 14) & 0xfff;
if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
return 0;
}
diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h
index c0dbb2ff41..550060170e 100644
--- a/hw/net/vmware_utils.h
+++ b/hw/net/vmware_utils.h
@@ -26,97 +26,104 @@
*
*/
static inline void
-vmw_shmem_read(hwaddr addr, void *buf, int len)
+vmw_shmem_read(PCIDevice *d, hwaddr addr, void *buf, int len)
{
VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
- cpu_physical_memory_read(addr, buf, len);
+ pci_dma_read(d, addr, buf, len);
}
static inline void
-vmw_shmem_write(hwaddr addr, void *buf, int len)
+vmw_shmem_write(PCIDevice *d, hwaddr addr, void *buf, int len)
{
VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
- cpu_physical_memory_write(addr, buf, len);
+ pci_dma_write(d, addr, buf, len);
}
static inline void
-vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
+vmw_shmem_rw(PCIDevice *d, hwaddr addr, void *buf, int len, int is_write)
{
VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
addr, len, buf, is_write);
- cpu_physical_memory_rw(addr, buf, len, is_write);
+ if (is_write)
+ pci_dma_write(d, addr, buf, len);
+ else
+ pci_dma_read(d, addr, buf, len);
}
static inline void
-vmw_shmem_set(hwaddr addr, uint8_t val, int len)
+vmw_shmem_set(PCIDevice *d, hwaddr addr, uint8_t val, int len)
{
int i;
VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
for (i = 0; i < len; i++) {
- cpu_physical_memory_write(addr + i, &val, 1);
+ pci_dma_write(d, addr + i, &val, 1);
}
}
static inline uint32_t
-vmw_shmem_ld8(hwaddr addr)
+vmw_shmem_ld8(PCIDevice *d, hwaddr addr)
{
- uint8_t res = ldub_phys(&address_space_memory, addr);
+ uint8_t res;
+ pci_dma_read(d, addr, &res, 1);
VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
static inline void
-vmw_shmem_st8(hwaddr addr, uint8_t value)
+vmw_shmem_st8(PCIDevice *d, hwaddr addr, uint8_t value)
{
VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
- stb_phys(&address_space_memory, addr, value);
+ pci_dma_write(d, addr, &value, 1);
}
static inline uint32_t
-vmw_shmem_ld16(hwaddr addr)
+vmw_shmem_ld16(PCIDevice *d, hwaddr addr)
{
- uint16_t res = lduw_le_phys(&address_space_memory, addr);
+ uint16_t res;
+ pci_dma_read(d, addr, &res, 2);
VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
static inline void
-vmw_shmem_st16(hwaddr addr, uint16_t value)
+vmw_shmem_st16(PCIDevice *d, hwaddr addr, uint16_t value)
{
VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
- stw_le_phys(&address_space_memory, addr, value);
+ pci_dma_write(d, addr, &value, 2);
}
static inline uint32_t
-vmw_shmem_ld32(hwaddr addr)
+vmw_shmem_ld32(PCIDevice *d, hwaddr addr)
{
- uint32_t res = ldl_le_phys(&address_space_memory, addr);
+ uint32_t res;
+ pci_dma_read(d, addr, &res, 4);
VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
return res;
}
static inline void
-vmw_shmem_st32(hwaddr addr, uint32_t value)
+vmw_shmem_st32(PCIDevice *d, hwaddr addr, uint32_t value)
{
VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
- stl_le_phys(&address_space_memory, addr, value);
+ pci_dma_write(d, addr, &value, 4);
}
static inline uint64_t
-vmw_shmem_ld64(hwaddr addr)
+vmw_shmem_ld64(PCIDevice *d, hwaddr addr)
{
- uint64_t res = ldq_le_phys(&address_space_memory, addr);
+ uint64_t res;
+ pci_dma_read(d, addr, &res, 8);
VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
return res;
}
static inline void
-vmw_shmem_st64(hwaddr addr, uint64_t value)
+vmw_shmem_st64(PCIDevice *d, hwaddr addr, uint64_t value)
{
VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
- stq_le_phys(&address_space_memory, addr, value);
+ pci_dma_write(d, addr, &value, 8);
}
/* Macros for simplification of operations on array-style registers */
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 25cee9ff4b..b8e3b25053 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -74,54 +74,54 @@
#define VMXNET3_MAX_NMSIX_INTRS (1)
/* Macros for rings descriptors access */
-#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \
- (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
+#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \
+ (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
-#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \
- (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
+#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \
+ (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value)))
-#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \
- (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
+#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \
+ (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
-#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \
- (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
+#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \
+ (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
-#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \
- (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
+#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \
+ (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field)))
-#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \
- (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
+#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \
+ (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value))
-#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \
- (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
+#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \
+ (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
-#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \
- (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
+#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \
+ (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field)))
-#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \
- (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
+#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \
+ (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
-#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \
- (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
+#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \
+ (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value))
/* Macros for guest driver shared area access */
-#define VMXNET3_READ_DRV_SHARED64(shpa, field) \
- (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \
+ (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
-#define VMXNET3_READ_DRV_SHARED32(shpa, field) \
- (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \
+ (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
-#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \
- (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
+#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \
+ (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val))
-#define VMXNET3_READ_DRV_SHARED16(shpa, field) \
- (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \
+ (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
-#define VMXNET3_READ_DRV_SHARED8(shpa, field) \
- (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field)))
+#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \
+ (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field)))
-#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \
- (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
+#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \
+ (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l))
#define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag))
@@ -147,7 +147,8 @@ typedef struct {
uint8_t gen;
} Vmxnet3Ring;
-static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
+static inline void vmxnet3_ring_init(PCIDevice *d,
+ Vmxnet3Ring *ring,
hwaddr pa,
size_t size,
size_t cell_size,
@@ -160,7 +161,7 @@ static inline void vmxnet3_ring_init(Vmxnet3Ring *ring,
ring->next = 0;
if (zero_region) {
- vmw_shmem_set(pa, 0, size * cell_size);
+ vmw_shmem_set(d, pa, 0, size * cell_size);
}
}
@@ -190,14 +191,16 @@ static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring)
return ring->pa + ring->next * ring->cell_size;
}
-static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff)
+static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring,
+ void *buff)
{
- vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
+ vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
}
-static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff)
+static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring,
+ void *buff)
{
- vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
+ vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size);
}
static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring)
@@ -456,9 +459,9 @@ vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked)
vmxnet3_update_interrupt_line_state(s, lidx);
}
-static bool vmxnet3_verify_driver_magic(hwaddr dshmem)
+static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem)
{
- return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC);
+ return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC);
}
#define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF)
@@ -526,13 +529,14 @@ vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx)
static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
{
struct Vmxnet3_TxCompDesc txcq_descr;
+ PCIDevice *d = PCI_DEVICE(s);
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
- vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr);
+ vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr);
/* Flush changes in TX descriptor before changing the counter value */
smp_wmb();
@@ -688,13 +692,14 @@ vmxnet3_pop_next_tx_descr(VMXNET3State *s,
uint32_t *descr_idx)
{
Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring;
+ PCIDevice *d = PCI_DEVICE(s);
- vmxnet3_ring_read_curr_cell(ring, txd);
+ vmxnet3_ring_read_curr_cell(d, ring, txd);
if (txd->gen == vmxnet3_ring_curr_gen(ring)) {
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
- vmxnet3_ring_read_curr_cell(ring, txd);
+ vmxnet3_ring_read_curr_cell(d, ring, txd);
VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring);
*descr_idx = vmxnet3_ring_curr_cell_idx(ring);
vmxnet3_inc_tx_consumption_counter(s, qidx);
@@ -782,9 +787,11 @@ static inline void
vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx,
struct Vmxnet3_RxDesc *dbuf, uint32_t *didx)
{
+ PCIDevice *d = PCI_DEVICE(s);
+
Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx];
*didx = vmxnet3_ring_curr_cell_idx(ring);
- vmxnet3_ring_read_curr_cell(ring, dbuf);
+ vmxnet3_ring_read_curr_cell(d, ring, dbuf);
}
static inline uint8_t
@@ -802,9 +809,8 @@ vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen)
hwaddr daddr =
vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring);
- pci_dma_read(PCI_DEVICE(s), daddr,
- &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
-
+ pci_dma_read(PCI_DEVICE(s),
+ daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc));
ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring);
if (rxcd.gen != ring_gen) {
@@ -1058,6 +1064,7 @@ static bool
vmxnet3_indicate_packet(VMXNET3State *s)
{
struct Vmxnet3_RxDesc rxd;
+ PCIDevice *d = PCI_DEVICE(s);
bool is_head = true;
uint32_t rxd_idx;
uint32_t rx_ridx = 0;
@@ -1091,7 +1098,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
}
chunk_size = MIN(bytes_left, rxd.len);
- vmxnet3_pci_dma_writev(PCI_DEVICE(s), data, bytes_copied,
+ vmxnet3_pci_dma_writev(d, data, bytes_copied,
le64_to_cpu(rxd.addr), chunk_size);
bytes_copied += chunk_size;
bytes_left -= chunk_size;
@@ -1099,7 +1106,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
vmxnet3_dump_rx_descr(&rxd);
if (ready_rxcd_pa != 0) {
- pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd));
+ pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
}
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
@@ -1131,7 +1138,7 @@ vmxnet3_indicate_packet(VMXNET3State *s)
rxcd.eop = 1;
rxcd.err = (bytes_left != 0);
- pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd));
+ pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
/* Flush RX descriptor changes */
smp_wmb();
@@ -1250,7 +1257,9 @@ static void vmxnet3_reset(VMXNET3State *s)
static void vmxnet3_update_rx_mode(VMXNET3State *s)
{
- s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
+ PCIDevice *d = PCI_DEVICE(s);
+
+ s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
devRead.rxFilterConf.rxMode);
VMW_CFPRN("RX mode: 0x%08X", s->rx_mode);
}
@@ -1258,9 +1267,10 @@ static void vmxnet3_update_rx_mode(VMXNET3State *s)
static void vmxnet3_update_vlan_filters(VMXNET3State *s)
{
int i;
+ PCIDevice *d = PCI_DEVICE(s);
/* Copy configuration from shared memory */
- VMXNET3_READ_DRV_SHARED(s->drv_shmem,
+ VMXNET3_READ_DRV_SHARED(d, s->drv_shmem,
devRead.rxFilterConf.vfTable,
s->vlan_table,
sizeof(s->vlan_table));
@@ -1281,8 +1291,10 @@ static void vmxnet3_update_vlan_filters(VMXNET3State *s)
static void vmxnet3_update_mcast_filters(VMXNET3State *s)
{
+ PCIDevice *d = PCI_DEVICE(s);
+
uint16_t list_bytes =
- VMXNET3_READ_DRV_SHARED16(s->drv_shmem,
+ VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem,
devRead.rxFilterConf.mfTableLen);
s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]);
@@ -1299,10 +1311,10 @@ static void vmxnet3_update_mcast_filters(VMXNET3State *s)
} else {
int i;
hwaddr mcast_list_pa =
- VMXNET3_READ_DRV_SHARED64(s->drv_shmem,
+ VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem,
devRead.rxFilterConf.mfTablePA);
- pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes);
+ pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes);
VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len);
for (i = 0; i < s->mcast_list_len; i++) {
@@ -1328,19 +1340,20 @@ static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
static void vmxnet3_fill_stats(VMXNET3State *s)
{
int i;
+ PCIDevice *d = PCI_DEVICE(s);
if (!s->device_active)
return;
for (i = 0; i < s->txq_num; i++) {
- pci_dma_write(PCI_DEVICE(s),
+ pci_dma_write(d,
s->txq_descr[i].tx_stats_pa,
&s->txq_descr[i].txq_stats,
sizeof(s->txq_descr[i].txq_stats));
}
for (i = 0; i < s->rxq_num; i++) {
- pci_dma_write(PCI_DEVICE(s),
+ pci_dma_write(d,
s->rxq_descr[i].rx_stats_pa,
&s->rxq_descr[i].rxq_stats,
sizeof(s->rxq_descr[i].rxq_stats));
@@ -1350,8 +1363,9 @@ static void vmxnet3_fill_stats(VMXNET3State *s)
static void vmxnet3_adjust_by_guest_type(VMXNET3State *s)
{
struct Vmxnet3_GOSInfo gos;
+ PCIDevice *d = PCI_DEVICE(s);
- VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos,
+ VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos,
&gos, sizeof(gos));
s->rx_packets_compound =
(gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true;
@@ -1371,13 +1385,14 @@ vmxnet3_dump_conf_descr(const char *name,
static void vmxnet3_update_pm_state(VMXNET3State *s)
{
struct Vmxnet3_VariableLenConfDesc pm_descr;
+ PCIDevice *d = PCI_DEVICE(s);
pm_descr.confLen =
- VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen);
+ VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen);
pm_descr.confVer =
- VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer);
+ VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer);
pm_descr.confPA =
- VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA);
+ VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA);
vmxnet3_dump_conf_descr("PM State", &pm_descr);
}
@@ -1386,8 +1401,9 @@ static void vmxnet3_update_features(VMXNET3State *s)
{
uint32_t guest_features;
int rxcso_supported;
+ PCIDevice *d = PCI_DEVICE(s);
- guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem,
+ guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem,
devRead.misc.uptFeatures);
rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM);
@@ -1462,12 +1478,13 @@ static void vmxnet3_activate_device(VMXNET3State *s)
{
int i;
static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1;
+ PCIDevice *d = PCI_DEVICE(s);
hwaddr qdescr_table_pa;
uint64_t pa;
uint32_t size;
/* Verify configuration consistency */
- if (!vmxnet3_verify_driver_magic(s->drv_shmem)) {
+ if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) {
VMW_ERPRN("Device configuration received from driver is invalid");
return;
}
@@ -1483,11 +1500,11 @@ static void vmxnet3_activate_device(VMXNET3State *s)
vmxnet3_update_pm_state(s);
vmxnet3_setup_rx_filtering(s);
/* Cache fields from shared memory */
- s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu);
+ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu);
VMW_CFPRN("MTU is %u", s->mtu);
s->max_rx_frags =
- VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG);
+ VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG);
if (s->max_rx_frags == 0) {
s->max_rx_frags = 1;
@@ -1496,24 +1513,24 @@ static void vmxnet3_activate_device(VMXNET3State *s)
VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags);
s->event_int_idx =
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx);
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx);
assert(vmxnet3_verify_intx(s, s->event_int_idx));
VMW_CFPRN("Events interrupt line is %u", s->event_int_idx);
s->auto_int_masking =
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask);
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask);
VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking);
s->txq_num =
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues);
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues);
s->rxq_num =
- VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues);
+ VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues);
VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num);
vmxnet3_validate_queues(s);
qdescr_table_pa =
- VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA);
+ VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA);
VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa);
/*
@@ -1529,25 +1546,25 @@ static void vmxnet3_activate_device(VMXNET3State *s)
/* Read interrupt number for this TX queue */
s->txq_descr[i].intr_idx =
- VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx);
+ VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx);
assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx));
VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx);
/* Read rings memory locations for TX queues */
- pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA);
- size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize);
+ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA);
+ size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize);
- vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size,
+ vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size,
sizeof(struct Vmxnet3_TxDesc), false);
VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring);
s->max_tx_frags += size;
/* TXC ring */
- pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA);
- size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize);
- vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size,
+ pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA);
+ size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize);
+ vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size,
sizeof(struct Vmxnet3_TxCompDesc), true);
VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring);
@@ -1558,7 +1575,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
sizeof(s->txq_descr[i].txq_stats));
/* Fill device-managed parameters for queues */
- VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa,
+ VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa,
ctrl.txThreshold,
VMXNET3_DEF_TX_THRESHOLD);
}
@@ -1578,7 +1595,7 @@ static void vmxnet3_activate_device(VMXNET3State *s)
/* Read interrupt number for this RX queue */
s->rxq_descr[i].intr_idx =
- VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx);
+ VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx);
assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx));
VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx);
@@ -1586,18 +1603,18 @@ static void vmxnet3_activate_device(VMXNET3State *s)
/* Read rings memory locations */
for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) {
/* RX rings */
- pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]);
- size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]);
- vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size,
+ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]);
+ size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]);
+ vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size,
sizeof(struct Vmxnet3_RxDesc), false);
VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d",
i, j, pa, size);
}
/* RXC ring */
- pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA);
- size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize);
- vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size,
+ pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA);
+ size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize);
+ vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size,
sizeof(struct Vmxnet3_RxCompDesc), true);
VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size);
@@ -1764,19 +1781,21 @@ static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
static void vmxnet3_set_events(VMXNET3State *s, uint32_t val)
{
uint32_t events;
+ PCIDevice *d = PCI_DEVICE(s);
VMW_CBPRN("Setting events: 0x%x", val);
- events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val;
- VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
+ events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val;
+ VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
}
static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
{
+ PCIDevice *d = PCI_DEVICE(s);
uint32_t events;
VMW_CBPRN("Clearing events: 0x%x", val);
- events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val;
- VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
+ events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val;
+ VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
}
static void
diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c
index 70f897e3a9..03be05dc0d 100644
--- a/hw/pci-host/q35.c
+++ b/hw/pci-host/q35.c
@@ -127,6 +127,10 @@ static Property mch_props[] = {
DEFINE_PROP_SIZE(PCI_HOST_PROP_PCI_HOLE64_SIZE, Q35PCIHost,
mch.pci_hole64_size, DEFAULT_PCI_HOLE64_SIZE),
DEFINE_PROP_UINT32("short_root_bus", Q35PCIHost, mch.short_root_bus, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MEM_SIZE, Q35PCIHost,
+ mch.below_4g_mem_size, 0),
+ DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, Q35PCIHost,
+ mch.above_4g_mem_size, 0),
DEFINE_PROP_END_OF_LIST(),
};
@@ -177,6 +181,22 @@ static void q35_host_initfn(Object *obj)
q35_host_get_mmcfg_size,
NULL, NULL, NULL, NULL);
+ object_property_add_link(obj, MCH_HOST_PROP_RAM_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->mch.ram_memory,
+ qdev_prop_allow_set_link_before_realize, 0, NULL);
+
+ object_property_add_link(obj, MCH_HOST_PROP_PCI_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->mch.pci_address_space,
+ qdev_prop_allow_set_link_before_realize, 0, NULL);
+
+ object_property_add_link(obj, MCH_HOST_PROP_SYSTEM_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->mch.system_memory,
+ qdev_prop_allow_set_link_before_realize, 0, NULL);
+
+ object_property_add_link(obj, MCH_HOST_PROP_IO_MEM, TYPE_MEMORY_REGION,
+ (Object **) &s->mch.address_space_io,
+ qdev_prop_allow_set_link_before_realize, 0, NULL);
+
/* Leave enough space for the biggest MCFG BAR */
/* TODO: this matches current bios behaviour, but
* it's not a power of two, which means an MTRR
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
index 5cc6608e50..91a3420f47 100644
--- a/hw/ppc/Makefile.objs
+++ b/hw/ppc/Makefile.objs
@@ -8,6 +8,7 @@ obj-$(CONFIG_PSERIES) += spapr_cpu_core.o
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
obj-y += spapr_pci_vfio.o
endif
+obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o
# PowerPC 4xx boards
obj-y += ppc405_boards.o ppc4xx_devs.o ppc405_uc.o ppc440_bamboo.o
obj-y += ppc4xx_pci.o
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index ee1c60b820..0cd534df55 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -601,7 +601,7 @@ static int ppce500_prep_device_tree(MachineState *machine,
}
/* Create -kernel TLB entries for BookE. */
-static inline hwaddr booke206_page_size_to_tlb(uint64_t size)
+hwaddr booke206_page_size_to_tlb(uint64_t size)
{
return 63 - clz64(size >> 10);
}
diff --git a/hw/ppc/e500.h b/hw/ppc/e500.h
index ef224ea5e6..70ba1d8f4f 100644
--- a/hw/ppc/e500.h
+++ b/hw/ppc/e500.h
@@ -26,4 +26,6 @@ typedef struct PPCE500Params {
void ppce500_init(MachineState *machine, PPCE500Params *params);
+hwaddr booke206_page_size_to_tlb(uint64_t size);
+
#endif
diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index 1bcf740f0e..e4252528a6 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -699,9 +699,18 @@ static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+
/* Raise it */
- LOG_TB("raise decrementer exception\n");
- ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
+ LOG_TB("raise hv decrementer exception\n");
+
+ /* The architecture specifies that we don't deliver HDEC
+ * interrupts in a PM state. Not only they don't cause a
+ * wakeup but they also get effectively discarded.
+ */
+ if (!env->in_pm_state) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
+ }
}
static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
@@ -928,9 +937,7 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
}
/* Create new timer */
tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
- if (0) {
- /* XXX: find a suitable condition to enable the hypervisor decrementer
- */
+ if (env->has_hv_mode) {
tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
cpu);
} else {
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index 76bd78bfd7..22c584eb8d 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -32,6 +32,7 @@
#include "sysemu/sysemu.h"
#include "hw/sysbus.h"
#include "sysemu/kvm.h"
+#include "e500.h"
#define MAX_CPUS 32
@@ -72,12 +73,6 @@ static void spin_reset(void *opaque)
}
}
-/* Create -kernel TLB entries for BookE, linearly spanning 256MB. */
-static inline hwaddr booke206_page_size_to_tlb(uint64_t size)
-{
- return ctz32(size >> 10) >> 1;
-}
-
static void mmubooke_create_initial_mapping(CPUPPCState *env,
target_ulong va,
hwaddr pa,
@@ -104,7 +99,7 @@ static void spin_kick(void *data)
hwaddr map_start;
cpu_synchronize_state(cpu);
- stl_p(&curspin->pir, env->spr[SPR_PIR]);
+ stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]);
env->nip = ldq_p(&curspin->addr) & (map_size - 1);
env->gpr[3] = ldq_p(&curspin->r3);
env->gpr[4] = 0;
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 778fa255a9..7f33a1b2b5 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -116,15 +116,16 @@ static XICSState *try_create_xics(const char *type, int nr_servers,
static XICSState *xics_system_init(MachineState *machine,
int nr_servers, int nr_irqs, Error **errp)
{
- XICSState *icp = NULL;
+ XICSState *xics = NULL;
if (kvm_enabled()) {
Error *err = NULL;
if (machine_kernel_irqchip_allowed(machine)) {
- icp = try_create_xics(TYPE_KVM_XICS, nr_servers, nr_irqs, &err);
+ xics = try_create_xics(TYPE_XICS_SPAPR_KVM, nr_servers, nr_irqs,
+ &err);
}
- if (machine_kernel_irqchip_required(machine) && !icp) {
+ if (machine_kernel_irqchip_required(machine) && !xics) {
error_reportf_err(err,
"kernel_irqchip requested but unavailable: ");
} else {
@@ -132,11 +133,11 @@ static XICSState *xics_system_init(MachineState *machine,
}
}
- if (!icp) {
- icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs, errp);
+ if (!xics) {
+ xics = try_create_xics(TYPE_XICS_SPAPR, nr_servers, nr_irqs, errp);
}
- return icp;
+ return xics;
}
static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
@@ -339,6 +340,9 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
add_str(hypertas, "hcall-splpar");
add_str(hypertas, "hcall-bulk");
add_str(hypertas, "hcall-set-mode");
+ add_str(hypertas, "hcall-sprg0");
+ add_str(hypertas, "hcall-copy");
+ add_str(hypertas, "hcall-debug");
add_str(qemu_hypertas, "hcall-memop1");
fdt = g_malloc0(FDT_MAX_SIZE);
@@ -1767,6 +1771,13 @@ static void ppc_spapr_init(MachineState *machine)
spapr->vrma_adjust = 1;
spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
}
+
+ /* Actually we don't support unbounded RMA anymore since we
+ * added proper emulation of HV mode. The max we can get is
+ * 16G which also happens to be what we configure for PAPR
+ * mode so make sure we don't do anything bigger than that
+ */
+ spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
}
if (spapr->rma_size > node0_size) {
@@ -1779,9 +1790,9 @@ static void ppc_spapr_init(MachineState *machine)
load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
/* Set up Interrupt Controller before we create the VCPUs */
- spapr->icp = xics_system_init(machine,
- DIV_ROUND_UP(max_cpus * smt, smp_threads),
- XICS_IRQS, &error_fatal);
+ spapr->xics = xics_system_init(machine,
+ DIV_ROUND_UP(max_cpus * smt, smp_threads),
+ XICS_IRQS_SPAPR, &error_fatal);
if (smc->dr_lmb_enabled) {
spapr_validate_node_memory(machine, &error_fatal);
@@ -2367,8 +2378,8 @@ static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine)
cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model);
cpu_item->vcpus_count = smp_threads;
- cpu_props->has_core = true;
- cpu_props->core = i * smt;
+ cpu_props->has_core_id = true;
+ cpu_props->core_id = i * smt;
/* TODO: add 'has_node/node' here to describe
to which node core belongs */
@@ -2485,7 +2496,12 @@ DEFINE_SPAPR_MACHINE(2_7, "2.7", true);
* pseries-2.6
*/
#define SPAPR_COMPAT_2_6 \
- HW_COMPAT_2_6
+ HW_COMPAT_2_6 \
+ { \
+ .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
+ .property = "ddw",\
+ .value = stringify(off),\
+ },
static void spapr_machine_2_6_instance_options(MachineState *machine)
{
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 3a5da09b99..70b6b0b5ee 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -42,7 +42,7 @@ static void spapr_cpu_destroy(PowerPCCPU *cpu)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- xics_cpu_destroy(spapr->icp, cpu);
+ xics_cpu_destroy(spapr->xics, cpu);
qemu_unregister_reset(spapr_cpu_reset, cpu);
}
@@ -76,7 +76,7 @@ void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu, Error **errp)
}
}
- xics_cpu_setup(spapr->icp, cpu);
+ xics_cpu_setup(spapr->xics, cpu);
qemu_register_reset(spapr_cpu_reset, cpu);
spapr_cpu_reset(cpu);
@@ -102,7 +102,6 @@ static void spapr_core_release(DeviceState *dev, void *opaque)
const char *typename = object_class_get_name(sc->cpu_class);
size_t size = object_type_get_instance_size(typename);
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(dev);
int smt = kvmppc_smt_threads();
int i;
@@ -120,7 +119,7 @@ static void spapr_core_release(DeviceState *dev, void *opaque)
spapr->cores[cc->core_id / smt] = NULL;
- g_free(core->threads);
+ g_free(sc->threads);
object_unparent(OBJECT(dev));
}
@@ -260,23 +259,24 @@ out:
error_propagate(errp, local_err);
}
-static int spapr_cpu_core_realize_child(Object *child, void *opaque)
+static void spapr_cpu_core_realize_child(Object *child, Error **errp)
{
- Error **errp = opaque;
+ Error *local_err = NULL;
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
CPUState *cs = CPU(child);
PowerPCCPU *cpu = POWERPC_CPU(cs);
- object_property_set_bool(child, true, "realized", errp);
- if (*errp) {
- return 1;
+ object_property_set_bool(child, true, "realized", &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
}
- spapr_cpu_init(spapr, cpu, errp);
- if (*errp) {
- return 1;
+ spapr_cpu_init(spapr, cpu, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
}
- return 0;
}
static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
@@ -286,13 +286,13 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
const char *typename = object_class_get_name(sc->cpu_class);
size_t size = object_type_get_instance_size(typename);
Error *local_err = NULL;
- Object *obj;
- int i;
+ void *obj;
+ int i, j;
sc->threads = g_malloc0(size * cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
char id[32];
- void *obj = sc->threads + i * size;
+ obj = sc->threads + i * size;
object_initialize(obj, size, typename);
snprintf(id, sizeof(id), "thread[%d]", i);
@@ -300,19 +300,23 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
if (local_err) {
goto err;
}
+ object_unref(obj);
}
- object_child_foreach(OBJECT(dev), spapr_cpu_core_realize_child, &local_err);
- if (local_err) {
- goto err;
- } else {
- return;
+
+ for (j = 0; j < cc->nr_threads; j++) {
+ obj = sc->threads + j * size;
+
+ spapr_cpu_core_realize_child(obj, &local_err);
+ if (local_err) {
+ goto err;
+ }
}
+ return;
err:
- while (i >= 0) {
+ while (--i >= 0) {
obj = sc->threads + i * size;
object_unparent(obj);
- i--;
}
g_free(sc->threads);
error_propagate(errp, local_err);
@@ -326,7 +330,6 @@ static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
/*
* instance_init routines from different flavours of sPAPR CPU cores.
- * TODO: Add support for 'host' core type.
*/
#define SPAPR_CPU_CORE_INITFN(_type, _fname) \
static void glue(glue(spapr_cpu_core_, _fname), _initfn(Object *obj)) \
@@ -339,10 +342,15 @@ static void glue(glue(spapr_cpu_core_, _fname), _initfn(Object *obj)) \
core->cpu_class = oc; \
}
+SPAPR_CPU_CORE_INITFN(970mp_v1.0, 970MP_v10);
+SPAPR_CPU_CORE_INITFN(970mp_v1.1, 970MP_v11);
+SPAPR_CPU_CORE_INITFN(970_v2.2, 970);
+SPAPR_CPU_CORE_INITFN(POWER5+_v2.1, POWER5plus);
SPAPR_CPU_CORE_INITFN(POWER7_v2.3, POWER7);
SPAPR_CPU_CORE_INITFN(POWER7+_v2.1, POWER7plus);
SPAPR_CPU_CORE_INITFN(POWER8_v2.0, POWER8);
SPAPR_CPU_CORE_INITFN(POWER8E_v2.1, POWER8E);
+SPAPR_CPU_CORE_INITFN(POWER8NVL_v1.0, POWER8NVL);
typedef struct SPAPRCoreInfo {
const char *name;
@@ -350,6 +358,21 @@ typedef struct SPAPRCoreInfo {
} SPAPRCoreInfo;
static const SPAPRCoreInfo spapr_cores[] = {
+ /* 970 and aliaes */
+ { .name = "970_v2.2", .initfn = spapr_cpu_core_970_initfn },
+ { .name = "970", .initfn = spapr_cpu_core_970_initfn },
+
+ /* 970MP variants and aliases */
+ { .name = "970MP_v1.0", .initfn = spapr_cpu_core_970MP_v10_initfn },
+ { .name = "970mp_v1.0", .initfn = spapr_cpu_core_970MP_v10_initfn },
+ { .name = "970MP_v1.1", .initfn = spapr_cpu_core_970MP_v11_initfn },
+ { .name = "970mp_v1.1", .initfn = spapr_cpu_core_970MP_v11_initfn },
+ { .name = "970mp", .initfn = spapr_cpu_core_970MP_v11_initfn },
+
+ /* POWER5 and aliases */
+ { .name = "POWER5+_v2.1", .initfn = spapr_cpu_core_POWER5plus_initfn },
+ { .name = "POWER5+", .initfn = spapr_cpu_core_POWER5plus_initfn },
+
/* POWER7 and aliases */
{ .name = "POWER7_v2.3", .initfn = spapr_cpu_core_POWER7_initfn },
{ .name = "POWER7", .initfn = spapr_cpu_core_POWER7_initfn },
@@ -367,6 +390,10 @@ static const SPAPRCoreInfo spapr_cores[] = {
{ .name = "POWER8E_v2.1", .initfn = spapr_cpu_core_POWER8E_initfn },
{ .name = "POWER8E", .initfn = spapr_cpu_core_POWER8E_initfn },
+ /* POWER8NVL and aliases */
+ { .name = "POWER8NVL_v1.0", .initfn = spapr_cpu_core_POWER8NVL_initfn },
+ { .name = "POWER8NVL", .initfn = spapr_cpu_core_POWER8NVL_initfn },
+
{ .name = NULL }
};
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index af8099220e..b0668b34a9 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -386,7 +386,7 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow, true);
- qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+ qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq));
}
static void spapr_hotplug_set_signalled(uint32_t drc_index)
@@ -468,7 +468,7 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
- qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+ qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq));
}
void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
@@ -551,7 +551,7 @@ static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
* interrupts.
*/
if (rtas_event_log_contains(mask, true)) {
- qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+ qemu_irq_pulse(xics_get_qirq(spapr->xics, spapr->check_exception_irq));
}
return;
@@ -603,7 +603,7 @@ out_no_events:
void spapr_events_init(sPAPRMachineState *spapr)
{
QTAILQ_INIT(&spapr->pending_events);
- spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false,
+ spapr->check_exception_irq = xics_spapr_alloc(spapr->xics, 0, 0, false,
&error_fatal);
spapr->epow_notifier.notify = spapr_powerdown_req;
qemu_register_powerdown_notifier(&spapr->epow_notifier);
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index e011ed4b66..73af112e1d 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -83,12 +83,12 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong pte_index = args[1];
target_ulong pteh = args[2];
target_ulong ptel = args[3];
- unsigned apshift, spshift;
+ unsigned apshift;
target_ulong raddr;
target_ulong index;
uint64_t token;
- apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
+ apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel);
if (!apshift) {
/* Bad page size encoding */
return H_PARAMETER;
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index e230bacae1..d57b05d5c0 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -156,6 +156,16 @@ static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
return 1ULL << tcet->page_shift;
}
+static void spapr_tce_notify_started(MemoryRegion *iommu)
+{
+ spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true);
+}
+
+static void spapr_tce_notify_stopped(MemoryRegion *iommu)
+{
+ spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), false);
+}
+
static int spapr_tce_table_post_load(void *opaque, int version_id)
{
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
@@ -236,6 +246,8 @@ static const VMStateDescription vmstate_spapr_tce_table = {
static MemoryRegionIOMMUOps spapr_iommu_ops = {
.translate = spapr_tce_translate_iommu,
.get_min_page_size = spapr_tce_get_min_page_size,
+ .notify_started = spapr_tce_notify_started,
+ .notify_stopped = spapr_tce_notify_stopped,
};
static int spapr_tce_table_realize(DeviceState *dev)
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 9f28fb3829..949c44fec8 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -35,6 +35,7 @@
#include "hw/ppc/spapr.h"
#include "hw/pci-host/spapr.h"
#include "exec/address-spaces.h"
+#include "exec/ram_addr.h"
#include <libfdt.h>
#include "trace.h"
#include "qemu/error-report.h"
@@ -45,6 +46,7 @@
#include "hw/ppc/spapr_drc.h"
#include "sysemu/device_tree.h"
#include "sysemu/kvm.h"
+#include "sysemu/hostmem.h"
#include "hw/vfio/vfio.h"
@@ -322,7 +324,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return;
}
- xics_free(spapr->icp, msi->first_irq, msi->num);
+ xics_spapr_free(spapr->xics, msi->first_irq, msi->num);
if (msi_present(pdev)) {
spapr_msi_setmsg(pdev, 0, false, 0, 0);
}
@@ -360,7 +362,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
/* Allocate MSIs */
- irq = xics_alloc_block(spapr->icp, 0, req_num, false,
+ irq = xics_spapr_alloc_block(spapr->xics, 0, req_num, false,
ret_intr_type == RTAS_TYPE_MSI, &err);
if (err) {
error_reportf_err(err, "Can't allocate MSIs for device %x: ",
@@ -371,7 +373,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
/* Release previous MSIs */
if (msi) {
- xics_free(spapr->icp, msi->first_irq, msi->num);
+ xics_spapr_free(spapr->xics, msi->first_irq, msi->num);
g_hash_table_remove(phb->msi, &config_addr);
}
@@ -733,7 +735,7 @@ static void spapr_msi_write(void *opaque, hwaddr addr,
trace_spapr_pci_msi_write(addr, data, irq);
- qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
+ qemu_irq_pulse(xics_get_qirq(spapr->xics, irq));
}
static const MemoryRegionOps spapr_msi_ops = {
@@ -1087,12 +1089,6 @@ static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
void *fdt = NULL;
int fdt_start_offset = 0, fdt_size;
- if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
- sPAPRTCETable *tcet = spapr_tce_find_by_liobn(phb->dma_liobn);
-
- spapr_tce_set_need_vfio(tcet, true);
- }
-
fdt = create_device_tree(&fdt_size);
fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0);
if (!fdt_start_offset) {
@@ -1310,11 +1306,14 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
PCIBus *bus;
uint64_t msi_window_size = 4096;
sPAPRTCETable *tcet;
+ const unsigned windows_supported =
+ sphb->ddw_enabled ? SPAPR_PCI_DMA_MAX_WINDOWS : 1;
if (sphb->index != (uint32_t)-1) {
hwaddr windows_base;
- if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
+ if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn[0] != (uint32_t)-1)
+ || (sphb->dma_liobn[1] != (uint32_t)-1 && windows_supported == 2)
|| (sphb->mem_win_addr != (hwaddr)-1)
|| (sphb->io_win_addr != (hwaddr)-1)) {
error_setg(errp, "Either \"index\" or other parameters must"
@@ -1329,7 +1328,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
}
sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
- sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
+ for (i = 0; i < windows_supported; ++i) {
+ sphb->dma_liobn[i] = SPAPR_PCI_LIOBN(sphb->index, i);
+ }
windows_base = SPAPR_PCI_WINDOW_BASE
+ sphb->index * SPAPR_PCI_WINDOW_SPACING;
@@ -1342,8 +1343,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
return;
}
- if (sphb->dma_liobn == (uint32_t)-1) {
- error_setg(errp, "LIOBN not specified for PHB");
+ if ((sphb->dma_liobn[0] == (uint32_t)-1) ||
+ ((sphb->dma_liobn[1] == (uint32_t)-1) && (windows_supported > 1))) {
+ error_setg(errp, "LIOBN(s) not specified for PHB");
return;
}
@@ -1442,7 +1444,8 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
uint32_t irq;
Error *local_err = NULL;
- irq = xics_alloc_block(spapr->icp, 0, 1, true, false, &local_err);
+ irq = xics_spapr_alloc_block(spapr->xics, 0, 1, true, false,
+ &local_err);
if (local_err) {
error_propagate(errp, local_err);
error_prepend(errp, "can't allocate LSIs: ");
@@ -1461,16 +1464,18 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
}
}
- tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn);
- if (!tcet) {
- error_setg(errp, "Unable to create TCE table for %s",
- sphb->dtbusname);
- return;
+ /* DMA setup */
+ for (i = 0; i < windows_supported; ++i) {
+ tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn[i]);
+ if (!tcet) {
+ error_setg(errp, "Creating window#%d failed for %s",
+ i, sphb->dtbusname);
+ return;
+ }
+ memory_region_add_subregion_overlap(&sphb->iommu_root, 0,
+ spapr_tce_get_iommu(tcet), 0);
}
- memory_region_add_subregion_overlap(&sphb->iommu_root, 0,
- spapr_tce_get_iommu(tcet), 0);
-
sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
}
@@ -1487,13 +1492,19 @@ static int spapr_phb_children_reset(Object *child, void *opaque)
void spapr_phb_dma_reset(sPAPRPHBState *sphb)
{
- sPAPRTCETable *tcet = spapr_tce_find_by_liobn(sphb->dma_liobn);
+ int i;
+ sPAPRTCETable *tcet;
- if (tcet && tcet->nb_table) {
- spapr_tce_table_disable(tcet);
+ for (i = 0; i < SPAPR_PCI_DMA_MAX_WINDOWS; ++i) {
+ tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[i]);
+
+ if (tcet && tcet->nb_table) {
+ spapr_tce_table_disable(tcet);
+ }
}
/* Register default 32bit DMA window */
+ tcet = spapr_tce_find_by_liobn(sphb->dma_liobn[0]);
spapr_tce_table_enable(tcet, SPAPR_TCE_PAGE_SHIFT, sphb->dma_win_addr,
sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT);
}
@@ -1515,7 +1526,8 @@ static void spapr_phb_reset(DeviceState *qdev)
static Property spapr_phb_properties[] = {
DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
- DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
+ DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn[0], -1),
+ DEFINE_PROP_UINT32("liobn64", sPAPRPHBState, dma_liobn[1], -1),
DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
SPAPR_PCI_MMIO_WIN_SIZE),
@@ -1527,6 +1539,11 @@ static Property spapr_phb_properties[] = {
/* Default DMA window is 0..1GB */
DEFINE_PROP_UINT64("dma_win_addr", sPAPRPHBState, dma_win_addr, 0),
DEFINE_PROP_UINT64("dma_win_size", sPAPRPHBState, dma_win_size, 0x40000000),
+ DEFINE_PROP_UINT64("dma64_win_addr", sPAPRPHBState, dma64_win_addr,
+ 0x800000000000000ULL),
+ DEFINE_PROP_BOOL("ddw", sPAPRPHBState, ddw_enabled, true),
+ DEFINE_PROP_UINT64("pgsz", sPAPRPHBState, page_size_mask,
+ (1ULL << 12) | (1ULL << 16)),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1603,7 +1620,7 @@ static const VMStateDescription vmstate_spapr_pci = {
.post_load = spapr_pci_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
- VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
+ VMSTATE_UINT32_EQUAL(dma_liobn[0], sPAPRPHBState),
VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
@@ -1779,6 +1796,15 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
uint32_t interrupt_map_mask[] = {
cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
+ uint32_t ddw_applicable[] = {
+ cpu_to_be32(RTAS_IBM_QUERY_PE_DMA_WINDOW),
+ cpu_to_be32(RTAS_IBM_CREATE_PE_DMA_WINDOW),
+ cpu_to_be32(RTAS_IBM_REMOVE_PE_DMA_WINDOW)
+ };
+ uint32_t ddw_extensions[] = {
+ cpu_to_be32(1),
+ cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW)
+ };
sPAPRTCETable *tcet;
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
sPAPRFDT s_fdt;
@@ -1801,7 +1827,15 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
_FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
_FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
_FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
- _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
+ _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS_SPAPR));
+
+ /* Dynamic DMA window */
+ if (phb->ddw_enabled) {
+ _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-applicable", &ddw_applicable,
+ sizeof(ddw_applicable)));
+ _FDT(fdt_setprop(fdt, bus_off, "ibm,ddw-extensions",
+ &ddw_extensions, sizeof(ddw_extensions)));
+ }
/* Build the interrupt-map, this must matches what is done
* in pci_spapr_map_irq
@@ -1826,7 +1860,7 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
sizeof(interrupt_map)));
- tcet = spapr_tce_find_by_liobn(phb->dma_liobn);
+ tcet = spapr_tce_find_by_liobn(phb->dma_liobn[0]);
if (!tcet) {
return -1;
}
diff --git a/hw/ppc/spapr_rtas_ddw.c b/hw/ppc/spapr_rtas_ddw.c
new file mode 100644
index 0000000000..177dcffc9b
--- /dev/null
+++ b/hw/ppc/spapr_rtas_ddw.c
@@ -0,0 +1,295 @@
+/*
+ * QEMU sPAPR Dynamic DMA windows support
+ *
+ * Copyright (c) 2015 Alexey Kardashevskiy, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "hw/ppc/spapr.h"
+#include "hw/pci-host/spapr.h"
+#include "trace.h"
+
+static int spapr_phb_get_active_win_num_cb(Object *child, void *opaque)
+{
+ sPAPRTCETable *tcet;
+
+ tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
+ if (tcet && tcet->nb_table) {
+ ++*(unsigned *)opaque;
+ }
+ return 0;
+}
+
+static unsigned spapr_phb_get_active_win_num(sPAPRPHBState *sphb)
+{
+ unsigned ret = 0;
+
+ object_child_foreach(OBJECT(sphb), spapr_phb_get_active_win_num_cb, &ret);
+
+ return ret;
+}
+
+static int spapr_phb_get_free_liobn_cb(Object *child, void *opaque)
+{
+ sPAPRTCETable *tcet;
+
+ tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
+ if (tcet && !tcet->nb_table) {
+ *(uint32_t *)opaque = tcet->liobn;
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned spapr_phb_get_free_liobn(sPAPRPHBState *sphb)
+{
+ uint32_t liobn = 0;
+
+ object_child_foreach(OBJECT(sphb), spapr_phb_get_free_liobn_cb, &liobn);
+
+ return liobn;
+}
+
+static uint32_t spapr_page_mask_to_query_mask(uint64_t page_mask)
+{
+ int i;
+ uint32_t mask = 0;
+ const struct { int shift; uint32_t mask; } masks[] = {
+ { 12, RTAS_DDW_PGSIZE_4K },
+ { 16, RTAS_DDW_PGSIZE_64K },
+ { 24, RTAS_DDW_PGSIZE_16M },
+ { 25, RTAS_DDW_PGSIZE_32M },
+ { 26, RTAS_DDW_PGSIZE_64M },
+ { 27, RTAS_DDW_PGSIZE_128M },
+ { 28, RTAS_DDW_PGSIZE_256M },
+ { 34, RTAS_DDW_PGSIZE_16G },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(masks); ++i) {
+ if (page_mask & (1ULL << masks[i].shift)) {
+ mask |= masks[i].mask;
+ }
+ }
+
+ return mask;
+}
+
+static void rtas_ibm_query_pe_dma_window(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ uint64_t buid, max_window_size;
+ uint32_t avail, addr, pgmask = 0;
+ MachineState *machine = MACHINE(spapr);
+
+ if ((nargs != 3) || (nret != 5)) {
+ goto param_error_exit;
+ }
+
+ buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
+ addr = rtas_ld(args, 0);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb || !sphb->ddw_enabled) {
+ goto param_error_exit;
+ }
+
+ /* Translate page mask to LoPAPR format */
+ pgmask = spapr_page_mask_to_query_mask(sphb->page_size_mask);
+
+ /*
+ * This is "Largest contiguous block of TCEs allocated specifically
+ * for (that is, are reserved for) this PE".
+ * Return the maximum number as maximum supported RAM size was in 4K pages.
+ */
+ if (machine->ram_size == machine->maxram_size) {
+ max_window_size = machine->ram_size;
+ } else {
+ MemoryHotplugState *hpms = &spapr->hotplug_memory;
+
+ max_window_size = hpms->base + memory_region_size(&hpms->mr);
+ }
+
+ avail = SPAPR_PCI_DMA_MAX_WINDOWS - spapr_phb_get_active_win_num(sphb);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, avail);
+ rtas_st(rets, 2, max_window_size >> SPAPR_TCE_PAGE_SHIFT);
+ rtas_st(rets, 3, pgmask);
+ rtas_st(rets, 4, 0); /* DMA migration mask, not supported */
+
+ trace_spapr_iommu_ddw_query(buid, addr, avail, max_window_size, pgmask);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_create_pe_dma_window(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRTCETable *tcet = NULL;
+ uint32_t addr, page_shift, window_shift, liobn;
+ uint64_t buid, win_addr;
+ int windows;
+
+ if ((nargs != 5) || (nret != 4)) {
+ goto param_error_exit;
+ }
+
+ buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
+ addr = rtas_ld(args, 0);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb || !sphb->ddw_enabled) {
+ goto param_error_exit;
+ }
+
+ page_shift = rtas_ld(args, 3);
+ window_shift = rtas_ld(args, 4);
+ liobn = spapr_phb_get_free_liobn(sphb);
+ windows = spapr_phb_get_active_win_num(sphb);
+
+ if (!(sphb->page_size_mask & (1ULL << page_shift)) ||
+ (window_shift < page_shift)) {
+ goto param_error_exit;
+ }
+
+ if (!liobn || !sphb->ddw_enabled || windows == SPAPR_PCI_DMA_MAX_WINDOWS) {
+ goto hw_error_exit;
+ }
+
+ tcet = spapr_tce_find_by_liobn(liobn);
+ if (!tcet) {
+ goto hw_error_exit;
+ }
+
+ win_addr = (windows == 0) ? sphb->dma_win_addr : sphb->dma64_win_addr;
+ spapr_tce_table_enable(tcet, page_shift, win_addr,
+ 1ULL << (window_shift - page_shift));
+ if (!tcet->nb_table) {
+ goto hw_error_exit;
+ }
+
+ trace_spapr_iommu_ddw_create(buid, addr, 1ULL << page_shift,
+ 1ULL << window_shift, tcet->bus_offset, liobn);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, liobn);
+ rtas_st(rets, 2, tcet->bus_offset >> 32);
+ rtas_st(rets, 3, tcet->bus_offset & ((uint32_t) -1));
+
+ return;
+
+hw_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_remove_pe_dma_window(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRTCETable *tcet;
+ uint32_t liobn;
+
+ if ((nargs != 1) || (nret != 1)) {
+ goto param_error_exit;
+ }
+
+ liobn = rtas_ld(args, 0);
+ tcet = spapr_tce_find_by_liobn(liobn);
+ if (!tcet) {
+ goto param_error_exit;
+ }
+
+ sphb = SPAPR_PCI_HOST_BRIDGE(OBJECT(tcet)->parent);
+ if (!sphb || !sphb->ddw_enabled || !tcet->nb_table) {
+ goto param_error_exit;
+ }
+
+ spapr_tce_table_disable(tcet);
+ trace_spapr_iommu_ddw_remove(liobn);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_reset_pe_dma_window(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ uint64_t buid;
+ uint32_t addr;
+
+ if ((nargs != 3) || (nret != 1)) {
+ goto param_error_exit;
+ }
+
+ buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
+ addr = rtas_ld(args, 0);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb || !sphb->ddw_enabled) {
+ goto param_error_exit;
+ }
+
+ spapr_phb_dma_reset(sphb);
+ trace_spapr_iommu_ddw_reset(buid, addr);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void spapr_rtas_ddw_init(void)
+{
+ spapr_rtas_register(RTAS_IBM_QUERY_PE_DMA_WINDOW,
+ "ibm,query-pe-dma-window",
+ rtas_ibm_query_pe_dma_window);
+ spapr_rtas_register(RTAS_IBM_CREATE_PE_DMA_WINDOW,
+ "ibm,create-pe-dma-window",
+ rtas_ibm_create_pe_dma_window);
+ spapr_rtas_register(RTAS_IBM_REMOVE_PE_DMA_WINDOW,
+ "ibm,remove-pe-dma-window",
+ rtas_ibm_remove_pe_dma_window);
+ spapr_rtas_register(RTAS_IBM_RESET_PE_DMA_WINDOW,
+ "ibm,reset-pe-dma-window",
+ rtas_ibm_reset_pe_dma_window);
+}
+
+type_init(spapr_rtas_ddw_init)
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index ae40db8fd2..f93244d7c1 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -463,7 +463,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
dev->qdev.id = id;
}
- dev->irq = xics_alloc(spapr->icp, 0, dev->irq, false, &local_err);
+ dev->irq = xics_spapr_alloc(spapr->xics, 0, dev->irq, false, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index 6da713547f..900679bc9d 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -30,6 +30,10 @@ spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, un
spapr_iommu_new_table(uint64_t liobn, void *table, int fd) "liobn=%"PRIx64" table=%p fd=%d"
spapr_iommu_pre_save(uint64_t liobn, uint32_t nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" bus_offset=%"PRIx64" ps=%"PRIu32
spapr_iommu_post_load(uint64_t liobn, uint32_t pre_nb, uint32_t post_nb, uint64_t offs, uint32_t ps) "liobn=%"PRIx64" %"PRIx32" => %"PRIx32" bus_offset=%"PRIx64" ps=%"PRIu32
+spapr_iommu_ddw_query(uint64_t buid, uint32_t cfgaddr, unsigned wa, uint64_t win_size, uint32_t pgmask) "buid=%"PRIx64" addr=%"PRIx32", %u windows available, max window size=%"PRIx64", mask=%"PRIx32
+spapr_iommu_ddw_create(uint64_t buid, uint32_t cfgaddr, uint64_t pg_size, uint64_t req_size, uint64_t start, uint32_t liobn) "buid=%"PRIx64" addr=%"PRIx32", page size=0x%"PRIx64", requested=0x%"PRIx64", start addr=%"PRIx64", liobn=%"PRIx32
+spapr_iommu_ddw_remove(uint32_t liobn) "liobn=%"PRIx32
+spapr_iommu_ddw_reset(uint64_t buid, uint32_t cfgaddr) "buid=%"PRIx64" addr=%"PRIx32
# hw/ppc/ppc.c
ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)"
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 1625e6b38b..8b709e362e 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -69,92 +69,58 @@ VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
return vdev;
}
-static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n,
- bool assign, bool set_handler)
+static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
{
- VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
- SubchDev *sch = dev->sch;
- uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+ virtio_bus_start_ioeventfd(&dev->bus);
+}
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d", __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- r = s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
- if (r < 0) {
- error_report("%s: unable to assign ioeventfd: %d", __func__, r);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- return r;
- }
- } else {
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
- event_notifier_cleanup(notifier);
- }
- return r;
+static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+{
+ virtio_bus_stop_ioeventfd(&dev->bus);
}
-static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
+static bool virtio_ccw_ioeventfd_started(DeviceState *d)
{
- VirtIODevice *vdev;
- int n, r;
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- if (!(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) ||
- dev->ioeventfd_disabled ||
- dev->ioeventfd_started) {
- return;
- }
- vdev = virtio_bus_get_device(&dev->bus);
- for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = virtio_ccw_set_guest2host_notifier(dev, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- dev->ioeventfd_started = true;
- return;
+ return dev->ioeventfd_started;
+}
- assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
- assert(r >= 0);
+static void virtio_ccw_ioeventfd_set_started(DeviceState *d, bool started,
+ bool err)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ dev->ioeventfd_started = started;
+ if (err) {
+ /* Disable ioeventfd for this device. */
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
}
- dev->ioeventfd_started = false;
- /* Disable ioeventfd for this device. */
- dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
- error_report("%s: failed. Fallback to userspace (slower).", __func__);
}
-static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+static bool virtio_ccw_ioeventfd_disabled(DeviceState *d)
{
- VirtIODevice *vdev;
- int n, r;
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
- if (!dev->ioeventfd_started) {
- return;
- }
- vdev = virtio_bus_get_device(&dev->bus);
- for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
- r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
- assert(r >= 0);
- }
- dev->ioeventfd_started = false;
+ return dev->ioeventfd_disabled ||
+ !(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD);
+}
+
+static void virtio_ccw_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ dev->ioeventfd_disabled = disabled;
+}
+
+static int virtio_ccw_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
+ int n, bool assign)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ SubchDev *sch = dev->sch;
+ uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+
+ return s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
}
VirtualCssBus *virtual_css_bus_init(void)
@@ -1157,19 +1123,6 @@ static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
}
-static int virtio_ccw_set_host_notifier(DeviceState *d, int n, bool assign)
-{
- VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
-
- /* Stop using the generic ioeventfd, we are doing eventfd handling
- * ourselves below */
- dev->ioeventfd_disabled = assign;
- if (assign) {
- virtio_ccw_stop_ioeventfd(dev);
- }
- return virtio_ccw_set_guest2host_notifier(dev, n, assign, false);
-}
-
static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
{
int r;
@@ -1798,7 +1751,6 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->notify = virtio_ccw_notify;
k->vmstate_change = virtio_ccw_vmstate_change;
k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
- k->set_host_notifier = virtio_ccw_set_host_notifier;
k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
k->save_queue = virtio_ccw_save_queue;
k->load_queue = virtio_ccw_load_queue;
@@ -1807,6 +1759,11 @@ static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
k->device_plugged = virtio_ccw_device_plugged;
k->post_plugged = virtio_ccw_post_plugged;
k->device_unplugged = virtio_ccw_device_unplugged;
+ k->ioeventfd_started = virtio_ccw_ioeventfd_started;
+ k->ioeventfd_set_started = virtio_ccw_ioeventfd_set_started;
+ k->ioeventfd_disabled = virtio_ccw_ioeventfd_disabled;
+ k->ioeventfd_set_disabled = virtio_ccw_ioeventfd_set_disabled;
+ k->ioeventfd_assign = virtio_ccw_ioeventfd_assign;
}
static const TypeInfo virtio_ccw_bus_info = {
diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c
index baa0a2cfdf..1f2f2d33dd 100644
--- a/hw/scsi/esp.c
+++ b/hw/scsi/esp.c
@@ -574,7 +574,7 @@ static bool esp_mem_accepts(void *opaque, hwaddr addr,
const VMStateDescription vmstate_esp = {
.name ="esp",
- .version_id = 3,
+ .version_id = 4,
.minimum_version_id = 3,
.fields = (VMStateField[]) {
VMSTATE_BUFFER(rregs, ESPState),
@@ -585,7 +585,8 @@ const VMStateDescription vmstate_esp = {
VMSTATE_BUFFER(ti_buf, ESPState),
VMSTATE_UINT32(status, ESPState),
VMSTATE_UINT32(dma, ESPState),
- VMSTATE_BUFFER(cmdbuf, ESPState),
+ VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
+ VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
VMSTATE_UINT32(cmdlen, ESPState),
VMSTATE_UINT32(do_cmd, ESPState),
VMSTATE_UINT32(dma_left, ESPState),
diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c
index 1a49f1e4b7..18ced31493 100644
--- a/hw/scsi/virtio-scsi-dataplane.c
+++ b/hw/scsi/virtio-scsi-dataplane.c
@@ -31,7 +31,7 @@ void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread)
s->ctx = iothread_get_aio_context(vs->conf.iothread);
/* Don't try if transport does not support notifiers. */
- if (!k->set_guest_notifiers || !k->set_host_notifier) {
+ if (!k->set_guest_notifiers || !k->ioeventfd_started) {
fprintf(stderr, "virtio-scsi: Failed to set iothread "
"(transport does not support notifiers)");
exit(1);
@@ -69,11 +69,10 @@ static int virtio_scsi_vring_init(VirtIOSCSI *s, VirtQueue *vq, int n,
void (*fn)(VirtIODevice *vdev, VirtQueue *vq))
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s)));
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int rc;
/* Set up virtqueue notify */
- rc = k->set_host_notifier(qbus->parent, n, true);
+ rc = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), n, true);
if (rc != 0) {
fprintf(stderr, "virtio-scsi: Failed to set host notifier (%d)\n",
rc);
@@ -159,7 +158,7 @@ fail_vrings:
virtio_scsi_clear_aio(s);
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
- k->set_host_notifier(qbus->parent, i, false);
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
k->set_guest_notifiers(qbus->parent, vs->conf.num_queues + 2, false);
fail_guest_notifiers:
@@ -198,7 +197,7 @@ void virtio_scsi_dataplane_stop(VirtIOSCSI *s)
aio_context_release(s->ctx);
for (i = 0; i < vs->conf.num_queues + 2; i++) {
- k->set_host_notifier(qbus->parent, i, false);
+ virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), i, false);
}
/* Clean up guest notifier (irq) */
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 71d09d3ef3..e8179d6616 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -666,11 +666,6 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
static void virtio_scsi_save(QEMUFile *f, void *opaque)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
- VirtIOSCSI *s = VIRTIO_SCSI(vdev);
-
- if (s->dataplane_started) {
- virtio_scsi_dataplane_stop(s);
- }
virtio_save(vdev, f);
}
diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c
index 075e4ed5df..3ff0886dd5 100644
--- a/hw/sd/ssi-sd.c
+++ b/hw/sd/ssi-sd.c
@@ -15,6 +15,7 @@
#include "sysemu/blockdev.h"
#include "hw/ssi/ssi.h"
#include "hw/sd/sd.h"
+#include "qapi/error.h"
//#define DEBUG_SSI_SD 1
@@ -249,7 +250,7 @@ static int ssi_sd_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static int ssi_sd_init(SSISlave *d)
+static void ssi_sd_realize(SSISlave *d, Error **errp)
{
DeviceState *dev = DEVICE(d);
ssi_sd_state *s = FROM_SSI_SLAVE(ssi_sd_state, d);
@@ -260,17 +261,17 @@ static int ssi_sd_init(SSISlave *d)
dinfo = drive_get_next(IF_SD);
s->sd = sd_init(dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, true);
if (s->sd == NULL) {
- return -1;
+ error_setg(errp, "Device initialization failed.");
+ return;
}
register_savevm(dev, "ssi_sd", -1, 1, ssi_sd_save, ssi_sd_load, s);
- return 0;
}
static void ssi_sd_class_init(ObjectClass *klass, void *data)
{
SSISlaveClass *k = SSI_SLAVE_CLASS(klass);
- k->init = ssi_sd_init;
+ k->realize = ssi_sd_realize;
k->transfer = ssi_sd_transfer;
k->cs_polarity = SSI_CS_LOW;
}
diff --git a/hw/sh4/sh_pci.c b/hw/sh4/sh_pci.c
index e820a32307..1747628f3d 100644
--- a/hw/sh4/sh_pci.c
+++ b/hw/sh4/sh_pci.c
@@ -55,7 +55,7 @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val,
switch(addr) {
case 0 ... 0xfc:
- cpu_to_le32w((uint32_t*)(pcic->dev->config + addr), val);
+ stl_le_p(pcic->dev->config + addr, val);
break;
case 0x1c0:
pcic->par = val;
@@ -85,7 +85,7 @@ static uint64_t sh_pci_reg_read (void *p, hwaddr addr,
switch(addr) {
case 0 ... 0xfc:
- return le32_to_cpup((uint32_t*)(pcic->dev->config + addr));
+ return ldl_le_p(pcic->dev->config + addr);
case 0x1c0:
return pcic->par;
case 0x1c4:
diff --git a/hw/smbios/Makefile.objs b/hw/smbios/Makefile.objs
index f69a92f967..c3d3753602 100644
--- a/hw/smbios/Makefile.objs
+++ b/hw/smbios/Makefile.objs
@@ -1 +1,2 @@
common-obj-$(CONFIG_SMBIOS) += smbios.o
+common-obj-$(call land,$(CONFIG_SMBIOS),$(CONFIG_IPMI)) += smbios_type_38.o
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index cb8a111102..74c7102929 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -24,6 +24,8 @@
#include "hw/smbios/smbios.h"
#include "hw/loader.h"
#include "exec/cpu-common.h"
+#include "smbios_build.h"
+#include "hw/smbios/ipmi.h"
/* legacy structures and constants for <= 2.0 machines */
struct smbios_header {
@@ -53,10 +55,10 @@ static bool smbios_uuid_encoded = true;
/* end: legacy structures & constants for <= 2.0 machines */
-static uint8_t *smbios_tables;
-static size_t smbios_tables_len;
-static unsigned smbios_table_max;
-static unsigned smbios_table_cnt;
+uint8_t *smbios_tables;
+size_t smbios_tables_len;
+unsigned smbios_table_max;
+unsigned smbios_table_cnt;
static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_21;
static SmbiosEntryPoint ep;
@@ -429,7 +431,7 @@ uint8_t *smbios_get_table_legacy(size_t *length)
/* end: legacy setup functions for <= 2.0 machines */
-static bool smbios_skip_table(uint8_t type, bool required_table)
+bool smbios_skip_table(uint8_t type, bool required_table)
{
if (test_bit(type, have_binfile_bitmap)) {
return true; /* user provided their own binary blob(s) */
@@ -443,65 +445,6 @@ static bool smbios_skip_table(uint8_t type, bool required_table)
return true;
}
-#define SMBIOS_BUILD_TABLE_PRE(tbl_type, tbl_handle, tbl_required) \
- struct smbios_type_##tbl_type *t; \
- size_t t_off; /* table offset into smbios_tables */ \
- int str_index = 0; \
- do { \
- /* should we skip building this table ? */ \
- if (smbios_skip_table(tbl_type, tbl_required)) { \
- return; \
- } \
- \
- /* use offset of table t within smbios_tables */ \
- /* (pointer must be updated after each realloc) */ \
- t_off = smbios_tables_len; \
- smbios_tables_len += sizeof(*t); \
- smbios_tables = g_realloc(smbios_tables, smbios_tables_len); \
- t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \
- \
- t->header.type = tbl_type; \
- t->header.length = sizeof(*t); \
- t->header.handle = cpu_to_le16(tbl_handle); \
- } while (0)
-
-#define SMBIOS_TABLE_SET_STR(tbl_type, field, value) \
- do { \
- int len = (value != NULL) ? strlen(value) + 1 : 0; \
- if (len > 1) { \
- smbios_tables = g_realloc(smbios_tables, \
- smbios_tables_len + len); \
- memcpy(smbios_tables + smbios_tables_len, value, len); \
- smbios_tables_len += len; \
- /* update pointer post-realloc */ \
- t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \
- t->field = ++str_index; \
- } else { \
- t->field = 0; \
- } \
- } while (0)
-
-#define SMBIOS_BUILD_TABLE_POST \
- do { \
- size_t term_cnt, t_size; \
- \
- /* add '\0' terminator (add two if no strings defined) */ \
- term_cnt = (str_index == 0) ? 2 : 1; \
- smbios_tables = g_realloc(smbios_tables, \
- smbios_tables_len + term_cnt); \
- memset(smbios_tables + smbios_tables_len, 0, term_cnt); \
- smbios_tables_len += term_cnt; \
- \
- /* update smbios max. element size */ \
- t_size = smbios_tables_len - t_off; \
- if (t_size > smbios_table_max) { \
- smbios_table_max = t_size; \
- } \
- \
- /* update smbios element count */ \
- smbios_table_cnt++; \
- } while (0)
-
static void smbios_build_type_0_table(void)
{
SMBIOS_BUILD_TABLE_PRE(0, 0x000, false); /* optional, leave up to BIOS */
@@ -906,6 +849,7 @@ void smbios_get_tables(const struct smbios_phys_mem_area *mem_array,
}
smbios_build_type_32_table();
+ smbios_build_type_38_table();
smbios_build_type_127_table();
smbios_validate_table();
diff --git a/hw/smbios/smbios_build.h b/hw/smbios/smbios_build.h
new file mode 100644
index 0000000000..68b8b72e09
--- /dev/null
+++ b/hw/smbios/smbios_build.h
@@ -0,0 +1,87 @@
+/*
+ * SMBIOS Support
+ *
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2013 Red Hat, Inc.
+ *
+ * Authors:
+ * Alex Williamson <alex.williamson@hp.com>
+ * Markus Armbruster <armbru@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef QEMU_SMBIOS_BUILD_H
+#define QEMU_SMBIOS_BUILD_H
+
+bool smbios_skip_table(uint8_t type, bool required_table);
+
+extern uint8_t *smbios_tables;
+extern size_t smbios_tables_len;
+extern unsigned smbios_table_max;
+extern unsigned smbios_table_cnt;
+
+#define SMBIOS_BUILD_TABLE_PRE(tbl_type, tbl_handle, tbl_required) \
+ struct smbios_type_##tbl_type *t; \
+ size_t t_off; /* table offset into smbios_tables */ \
+ int str_index = 0; \
+ do { \
+ /* should we skip building this table ? */ \
+ if (smbios_skip_table(tbl_type, tbl_required)) { \
+ return; \
+ } \
+ \
+ /* use offset of table t within smbios_tables */ \
+ /* (pointer must be updated after each realloc) */ \
+ t_off = smbios_tables_len; \
+ smbios_tables_len += sizeof(*t); \
+ smbios_tables = g_realloc(smbios_tables, smbios_tables_len); \
+ t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \
+ \
+ t->header.type = tbl_type; \
+ t->header.length = sizeof(*t); \
+ t->header.handle = cpu_to_le16(tbl_handle); \
+ } while (0)
+
+#define SMBIOS_TABLE_SET_STR(tbl_type, field, value) \
+ do { \
+ int len = (value != NULL) ? strlen(value) + 1 : 0; \
+ if (len > 1) { \
+ smbios_tables = g_realloc(smbios_tables, \
+ smbios_tables_len + len); \
+ memcpy(smbios_tables + smbios_tables_len, value, len); \
+ smbios_tables_len += len; \
+ /* update pointer post-realloc */ \
+ t = (struct smbios_type_##tbl_type *)(smbios_tables + t_off); \
+ t->field = ++str_index; \
+ } else { \
+ t->field = 0; \
+ } \
+ } while (0)
+
+#define SMBIOS_BUILD_TABLE_POST \
+ do { \
+ size_t term_cnt, t_size; \
+ \
+ /* add '\0' terminator (add two if no strings defined) */ \
+ term_cnt = (str_index == 0) ? 2 : 1; \
+ smbios_tables = g_realloc(smbios_tables, \
+ smbios_tables_len + term_cnt); \
+ memset(smbios_tables + smbios_tables_len, 0, term_cnt); \
+ smbios_tables_len += term_cnt; \
+ \
+ /* update smbios max. element size */ \
+ t_size = smbios_tables_len - t_off; \
+ if (t_size > smbios_table_max) { \
+ smbios_table_max = t_size; \
+ } \
+ \
+ /* update smbios element count */ \
+ smbios_table_cnt++; \
+ } while (0)
+
+#endif /* QEMU_SMBIOS_BUILD_H */
diff --git a/hw/smbios/smbios_type_38.c b/hw/smbios/smbios_type_38.c
new file mode 100644
index 0000000000..56e8609c00
--- /dev/null
+++ b/hw/smbios/smbios_type_38.c
@@ -0,0 +1,117 @@
+/*
+ * IPMI SMBIOS firmware handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ipmi/ipmi.h"
+#include "hw/smbios/ipmi.h"
+#include "hw/smbios/smbios.h"
+#include "qemu/error-report.h"
+#include "smbios_build.h"
+
+/* SMBIOS type 38 - IPMI */
+struct smbios_type_38 {
+ struct smbios_structure_header header;
+ uint8_t interface_type;
+ uint8_t ipmi_spec_revision;
+ uint8_t i2c_slave_address;
+ uint8_t nv_storage_device_address;
+ uint64_t base_address;
+ uint8_t base_address_modifier;
+ uint8_t interrupt_number;
+} QEMU_PACKED;
+
+static void smbios_build_one_type_38(IPMIFwInfo *info)
+{
+ uint64_t baseaddr = info->base_address;
+ SMBIOS_BUILD_TABLE_PRE(38, 0x3000, true);
+
+ t->interface_type = info->interface_type;
+ t->ipmi_spec_revision = ((info->ipmi_spec_major_revision << 4)
+ | info->ipmi_spec_minor_revision);
+ t->i2c_slave_address = info->i2c_slave_address;
+ t->nv_storage_device_address = 0;
+
+ assert(info->ipmi_spec_minor_revision <= 15);
+ assert(info->ipmi_spec_major_revision <= 15);
+
+ /* or 1 to set it to I/O space */
+ switch (info->memspace) {
+ case IPMI_MEMSPACE_IO:
+ baseaddr |= 1;
+ break;
+ case IPMI_MEMSPACE_MEM32:
+ case IPMI_MEMSPACE_MEM64:
+ break;
+ case IPMI_MEMSPACE_SMBUS:
+ baseaddr <<= 1;
+ break;
+ }
+
+ t->base_address = cpu_to_le64(baseaddr);
+
+ t->base_address_modifier = 0;
+ if (info->irq_type == IPMI_LEVEL_IRQ) {
+ t->base_address_modifier |= 1;
+ }
+ switch (info->register_spacing) {
+ case 1:
+ break;
+ case 4:
+ t->base_address_modifier |= 1 << 6;
+ break;
+ case 16:
+ t->base_address_modifier |= 2 << 6;
+ break;
+ default:
+ error_report("IPMI register spacing %d is not compatible with"
+ " SMBIOS, ignoring this entry.", info->register_spacing);
+ return;
+ }
+ t->interrupt_number = info->interrupt_number;
+
+ SMBIOS_BUILD_TABLE_POST;
+}
+
+static void smbios_add_ipmi_devices(BusState *bus)
+{
+ BusChild *kid;
+
+ QTAILQ_FOREACH(kid, &bus->children, sibling) {
+ DeviceState *dev = kid->child;
+ Object *obj = object_dynamic_cast(OBJECT(dev), TYPE_IPMI_INTERFACE);
+ BusState *childbus;
+
+ if (obj) {
+ IPMIInterface *ii;
+ IPMIInterfaceClass *iic;
+ IPMIFwInfo info;
+
+ ii = IPMI_INTERFACE(obj);
+ iic = IPMI_INTERFACE_GET_CLASS(obj);
+ memset(&info, 0, sizeof(info));
+ iic->get_fwinfo(ii, &info);
+ smbios_build_one_type_38(&info);
+ continue;
+ }
+
+ QLIST_FOREACH(childbus, &dev->child_bus, sibling) {
+ smbios_add_ipmi_devices(childbus);
+ }
+ }
+}
+
+void smbios_build_type_38_table(void)
+{
+ BusState *bus;
+
+ bus = sysbus_get_default();
+ if (bus) {
+ smbios_add_ipmi_devices(bus);
+ }
+}
diff --git a/hw/ssi/Makefile.objs b/hw/ssi/Makefile.objs
index fcbb79ef01..c79a8dcd86 100644
--- a/hw/ssi/Makefile.objs
+++ b/hw/ssi/Makefile.objs
@@ -2,6 +2,7 @@ common-obj-$(CONFIG_PL022) += pl022.o
common-obj-$(CONFIG_SSI) += ssi.o
common-obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o
common-obj-$(CONFIG_XILINX_SPIPS) += xilinx_spips.o
+common-obj-$(CONFIG_ASPEED_SOC) += aspeed_smc.o
obj-$(CONFIG_OMAP) += omap_spi.o
obj-$(CONFIG_IMX) += imx_spi.o
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
new file mode 100644
index 0000000000..a371e302d4
--- /dev/null
+++ b/hw/ssi/aspeed_smc.c
@@ -0,0 +1,470 @@
+/*
+ * ASPEED AST2400 SMC Controller (SPI Flash Only)
+ *
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "sysemu/sysemu.h"
+#include "qemu/log.h"
+#include "include/qemu/error-report.h"
+#include "exec/address-spaces.h"
+
+#include "hw/ssi/aspeed_smc.h"
+
+/* CE Type Setting Register */
+#define R_CONF (0x00 / 4)
+#define CONF_LEGACY_DISABLE (1 << 31)
+#define CONF_ENABLE_W4 20
+#define CONF_ENABLE_W3 19
+#define CONF_ENABLE_W2 18
+#define CONF_ENABLE_W1 17
+#define CONF_ENABLE_W0 16
+#define CONF_FLASH_TYPE4 9
+#define CONF_FLASH_TYPE3 7
+#define CONF_FLASH_TYPE2 5
+#define CONF_FLASH_TYPE1 3
+#define CONF_FLASH_TYPE0 1
+
+/* CE Control Register */
+#define R_CE_CTRL (0x04 / 4)
+#define CTRL_EXTENDED4 4 /* 32 bit addressing for SPI */
+#define CTRL_EXTENDED3 3 /* 32 bit addressing for SPI */
+#define CTRL_EXTENDED2 2 /* 32 bit addressing for SPI */
+#define CTRL_EXTENDED1 1 /* 32 bit addressing for SPI */
+#define CTRL_EXTENDED0 0 /* 32 bit addressing for SPI */
+
+/* Interrupt Control and Status Register */
+#define R_INTR_CTRL (0x08 / 4)
+#define INTR_CTRL_DMA_STATUS (1 << 11)
+#define INTR_CTRL_CMD_ABORT_STATUS (1 << 10)
+#define INTR_CTRL_WRITE_PROTECT_STATUS (1 << 9)
+#define INTR_CTRL_DMA_EN (1 << 3)
+#define INTR_CTRL_CMD_ABORT_EN (1 << 2)
+#define INTR_CTRL_WRITE_PROTECT_EN (1 << 1)
+
+/* CEx Control Register */
+#define R_CTRL0 (0x10 / 4)
+#define CTRL_CMD_SHIFT 16
+#define CTRL_CMD_MASK 0xff
+#define CTRL_CE_STOP_ACTIVE (1 << 2)
+#define CTRL_CMD_MODE_MASK 0x3
+#define CTRL_READMODE 0x0
+#define CTRL_FREADMODE 0x1
+#define CTRL_WRITEMODE 0x2
+#define CTRL_USERMODE 0x3
+#define R_CTRL1 (0x14 / 4)
+#define R_CTRL2 (0x18 / 4)
+#define R_CTRL3 (0x1C / 4)
+#define R_CTRL4 (0x20 / 4)
+
+/* CEx Segment Address Register */
+#define R_SEG_ADDR0 (0x30 / 4)
+#define SEG_SIZE_SHIFT 24 /* 8MB units */
+#define SEG_SIZE_MASK 0x7f
+#define SEG_START_SHIFT 16 /* address bit [A29-A23] */
+#define SEG_START_MASK 0x7f
+#define R_SEG_ADDR1 (0x34 / 4)
+#define R_SEG_ADDR2 (0x38 / 4)
+#define R_SEG_ADDR3 (0x3C / 4)
+#define R_SEG_ADDR4 (0x40 / 4)
+
+/* Misc Control Register #1 */
+#define R_MISC_CTRL1 (0x50 / 4)
+
+/* Misc Control Register #2 */
+#define R_MISC_CTRL2 (0x54 / 4)
+
+/* DMA Control/Status Register */
+#define R_DMA_CTRL (0x80 / 4)
+#define DMA_CTRL_DELAY_MASK 0xf
+#define DMA_CTRL_DELAY_SHIFT 8
+#define DMA_CTRL_FREQ_MASK 0xf
+#define DMA_CTRL_FREQ_SHIFT 4
+#define DMA_CTRL_MODE (1 << 3)
+#define DMA_CTRL_CKSUM (1 << 2)
+#define DMA_CTRL_DIR (1 << 1)
+#define DMA_CTRL_EN (1 << 0)
+
+/* DMA Flash Side Address */
+#define R_DMA_FLASH_ADDR (0x84 / 4)
+
+/* DMA DRAM Side Address */
+#define R_DMA_DRAM_ADDR (0x88 / 4)
+
+/* DMA Length Register */
+#define R_DMA_LEN (0x8C / 4)
+
+/* Checksum Calculation Result */
+#define R_DMA_CHECKSUM (0x90 / 4)
+
+/* Misc Control Register #2 */
+#define R_TIMINGS (0x94 / 4)
+
+/* SPI controller registers and bits */
+#define R_SPI_CONF (0x00 / 4)
+#define SPI_CONF_ENABLE_W0 0
+#define R_SPI_CTRL0 (0x4 / 4)
+#define R_SPI_MISC_CTRL (0x10 / 4)
+#define R_SPI_TIMINGS (0x14 / 4)
+
+/*
+ * Default segments mapping addresses and size for each slave per
+ * controller. These can be changed when board is initialized with the
+ * Segment Address Registers but they don't seem do be used on the
+ * field.
+ */
+static const AspeedSegments aspeed_segments_legacy[] = {
+ { 0x10000000, 32 * 1024 * 1024 },
+};
+
+static const AspeedSegments aspeed_segments_fmc[] = {
+ { 0x20000000, 64 * 1024 * 1024 },
+ { 0x24000000, 32 * 1024 * 1024 },
+ { 0x26000000, 32 * 1024 * 1024 },
+ { 0x28000000, 32 * 1024 * 1024 },
+ { 0x2A000000, 32 * 1024 * 1024 }
+};
+
+static const AspeedSegments aspeed_segments_spi[] = {
+ { 0x30000000, 64 * 1024 * 1024 },
+};
+
+static const AspeedSMCController controllers[] = {
+ { "aspeed.smc.smc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
+ CONF_ENABLE_W0, 5, aspeed_segments_legacy, 0x6000000 },
+ { "aspeed.smc.fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
+ CONF_ENABLE_W0, 5, aspeed_segments_fmc, 0x10000000 },
+ { "aspeed.smc.spi", R_SPI_CONF, 0xff, R_SPI_CTRL0, R_SPI_TIMINGS,
+ SPI_CONF_ENABLE_W0, 1, aspeed_segments_spi, 0x10000000 },
+};
+
+static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u"
+ PRIx64 "\n", __func__, addr, size);
+ return 0;
+}
+
+static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%"
+ PRIx64 "\n", __func__, addr, size, data);
+}
+
+static const MemoryRegionOps aspeed_smc_flash_default_ops = {
+ .read = aspeed_smc_flash_default_read,
+ .write = aspeed_smc_flash_default_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static inline int aspeed_smc_flash_mode(const AspeedSMCState *s, int cs)
+{
+ return s->regs[s->r_ctrl0 + cs] & CTRL_CMD_MODE_MASK;
+}
+
+static inline bool aspeed_smc_is_usermode(const AspeedSMCState *s, int cs)
+{
+ return aspeed_smc_flash_mode(s, cs) == CTRL_USERMODE;
+}
+
+static inline bool aspeed_smc_is_writable(const AspeedSMCState *s, int cs)
+{
+ return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + cs));
+}
+
+static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size)
+{
+ AspeedSMCFlash *fl = opaque;
+ const AspeedSMCState *s = fl->controller;
+ uint64_t ret = 0;
+ int i;
+
+ if (aspeed_smc_is_usermode(s, fl->id)) {
+ for (i = 0; i < size; i++) {
+ ret |= ssi_transfer(s->spi, 0x0) << (8 * i);
+ }
+ } else {
+ qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n",
+ __func__);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned size)
+{
+ AspeedSMCFlash *fl = opaque;
+ const AspeedSMCState *s = fl->controller;
+ int i;
+
+ if (!aspeed_smc_is_writable(s, fl->id)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%"
+ HWADDR_PRIx "\n", __func__, addr);
+ return;
+ }
+
+ if (!aspeed_smc_is_usermode(s, fl->id)) {
+ qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n",
+ __func__);
+ return;
+ }
+
+ for (i = 0; i < size; i++) {
+ ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ }
+}
+
+static const MemoryRegionOps aspeed_smc_flash_ops = {
+ .read = aspeed_smc_flash_read,
+ .write = aspeed_smc_flash_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static bool aspeed_smc_is_ce_stop_active(const AspeedSMCState *s, int cs)
+{
+ return s->regs[s->r_ctrl0 + cs] & CTRL_CE_STOP_ACTIVE;
+}
+
+static void aspeed_smc_update_cs(const AspeedSMCState *s)
+{
+ int i;
+
+ for (i = 0; i < s->num_cs; ++i) {
+ qemu_set_irq(s->cs_lines[i], aspeed_smc_is_ce_stop_active(s, i));
+ }
+}
+
+static void aspeed_smc_reset(DeviceState *d)
+{
+ AspeedSMCState *s = ASPEED_SMC(d);
+ int i;
+
+ memset(s->regs, 0, sizeof s->regs);
+
+ /* Unselect all slaves */
+ for (i = 0; i < s->num_cs; ++i) {
+ s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE;
+ }
+
+ aspeed_smc_update_cs(s);
+}
+
+static bool aspeed_smc_is_implemented(AspeedSMCState *s, hwaddr addr)
+{
+ return (addr == s->r_conf || addr == s->r_timings || addr == s->r_ce_ctrl ||
+ (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs));
+}
+
+static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size)
+{
+ AspeedSMCState *s = ASPEED_SMC(opaque);
+
+ addr >>= 2;
+
+ if (addr >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds read at 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return 0;
+ }
+
+ if (!aspeed_smc_is_implemented(s, addr)) {
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return 0;
+ }
+
+ return s->regs[addr];
+}
+
+static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
+{
+ AspeedSMCState *s = ASPEED_SMC(opaque);
+ uint32_t value = data;
+
+ addr >>= 2;
+
+ if (addr >= ARRAY_SIZE(s->regs)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Out-of-bounds write at 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return;
+ }
+
+ if (!aspeed_smc_is_implemented(s, addr)) {
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented: 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
+ return;
+ }
+
+ /*
+ * Not much to do apart from storing the value and set the cs
+ * lines if the register is a controlling one.
+ */
+ s->regs[addr] = value;
+ if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) {
+ aspeed_smc_update_cs(s);
+ }
+}
+
+static const MemoryRegionOps aspeed_smc_ops = {
+ .read = aspeed_smc_read,
+ .write = aspeed_smc_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid.unaligned = true,
+};
+
+static void aspeed_smc_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ AspeedSMCState *s = ASPEED_SMC(dev);
+ AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s);
+ int i;
+ char name[32];
+ hwaddr offset = 0;
+
+ s->ctrl = mc->ctrl;
+
+ /* keep a copy under AspeedSMCState to speed up accesses */
+ s->r_conf = s->ctrl->r_conf;
+ s->r_ce_ctrl = s->ctrl->r_ce_ctrl;
+ s->r_ctrl0 = s->ctrl->r_ctrl0;
+ s->r_timings = s->ctrl->r_timings;
+ s->conf_enable_w0 = s->ctrl->conf_enable_w0;
+
+ /* Enforce some real HW limits */
+ if (s->num_cs > s->ctrl->max_slaves) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n",
+ __func__, s->ctrl->max_slaves);
+ s->num_cs = s->ctrl->max_slaves;
+ }
+
+ s->spi = ssi_create_bus(dev, "spi");
+
+ /* Setup cs_lines for slaves */
+ sysbus_init_irq(sbd, &s->irq);
+ s->cs_lines = g_new0(qemu_irq, s->num_cs);
+ ssi_auto_connect_slaves(dev, s->cs_lines, s->spi);
+
+ for (i = 0; i < s->num_cs; ++i) {
+ sysbus_init_irq(sbd, &s->cs_lines[i]);
+ }
+
+ aspeed_smc_reset(dev);
+
+ memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s,
+ s->ctrl->name, ASPEED_SMC_R_MAX * 4);
+ sysbus_init_mmio(sbd, &s->mmio);
+
+ /*
+ * Memory region where flash modules are remapped
+ */
+ snprintf(name, sizeof(name), "%s.flash", s->ctrl->name);
+
+ memory_region_init_io(&s->mmio_flash, OBJECT(s),
+ &aspeed_smc_flash_default_ops, s, name,
+ s->ctrl->mapping_window_size);
+ sysbus_init_mmio(sbd, &s->mmio_flash);
+
+ s->flashes = g_new0(AspeedSMCFlash, s->num_cs);
+
+ for (i = 0; i < s->num_cs; ++i) {
+ AspeedSMCFlash *fl = &s->flashes[i];
+
+ snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i);
+
+ fl->id = i;
+ fl->controller = s;
+ fl->size = s->ctrl->segments[i].size;
+ memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops,
+ fl, name, fl->size);
+ memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio);
+ offset += fl->size;
+ }
+}
+
+static const VMStateDescription vmstate_aspeed_smc = {
+ .name = "aspeed.smc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static Property aspeed_smc_properties[] = {
+ DEFINE_PROP_UINT32("num-cs", AspeedSMCState, num_cs, 1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void aspeed_smc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass);
+
+ dc->realize = aspeed_smc_realize;
+ dc->reset = aspeed_smc_reset;
+ dc->props = aspeed_smc_properties;
+ dc->vmsd = &vmstate_aspeed_smc;
+ mc->ctrl = data;
+}
+
+static const TypeInfo aspeed_smc_info = {
+ .name = TYPE_ASPEED_SMC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AspeedSMCState),
+ .class_size = sizeof(AspeedSMCClass),
+ .abstract = true,
+};
+
+static void aspeed_smc_register_types(void)
+{
+ int i;
+
+ type_register_static(&aspeed_smc_info);
+ for (i = 0; i < ARRAY_SIZE(controllers); ++i) {
+ TypeInfo ti = {
+ .name = controllers[i].name,
+ .parent = TYPE_ASPEED_SMC,
+ .class_init = aspeed_smc_class_init,
+ .class_data = (void *)&controllers[i],
+ };
+ type_register(&ti);
+ }
+}
+
+type_init(aspeed_smc_register_types)
diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c
index 9791c0d947..7eaaf565fd 100644
--- a/hw/ssi/ssi.c
+++ b/hw/ssi/ssi.c
@@ -54,7 +54,7 @@ static uint32_t ssi_transfer_raw_default(SSISlave *dev, uint32_t val)
return 0;
}
-static int ssi_slave_init(DeviceState *dev)
+static void ssi_slave_realize(DeviceState *dev, Error **errp)
{
SSISlave *s = SSI_SLAVE(dev);
SSISlaveClass *ssc = SSI_SLAVE_GET_CLASS(s);
@@ -64,7 +64,7 @@ static int ssi_slave_init(DeviceState *dev)
qdev_init_gpio_in_named(dev, ssi_cs_default, SSI_GPIO_CS, 1);
}
- return ssc->init(s);
+ ssc->realize(s, errp);
}
static void ssi_slave_class_init(ObjectClass *klass, void *data)
@@ -72,7 +72,7 @@ static void ssi_slave_class_init(ObjectClass *klass, void *data)
SSISlaveClass *ssc = SSI_SLAVE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- dc->init = ssi_slave_init;
+ dc->realize = ssi_slave_realize;
dc->bus_type = TYPE_SSI_BUS;
if (!ssc->transfer_raw) {
ssc->transfer_raw = ssi_transfer_raw_default;
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index f4e333eb8f..ea625f25ce 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -906,6 +906,8 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
object_property_add_alias(qdev_get_machine(), "rtc-time",
OBJECT(s), "date", NULL);
+
+ qdev_init_gpio_out(dev, &s->irq, 1);
}
ISADevice *rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq)
@@ -920,9 +922,9 @@ ISADevice *rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq)
qdev_prop_set_int32(dev, "base_year", base_year);
qdev_init_nofail(dev);
if (intercept_irq) {
- s->irq = intercept_irq;
+ qdev_connect_gpio_out(dev, 0, intercept_irq);
} else {
- isa_init_irq(isadev, &s->irq, RTC_ISA_IRQ);
+ isa_connect_gpio_out(isadev, 0, RTC_ISA_IRQ);
}
QLIST_INSERT_HEAD(&rtc_devices, s, link);
diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs
index ceddbb8f99..c25e32b029 100644
--- a/hw/vfio/Makefile.objs
+++ b/hw/vfio/Makefile.objs
@@ -4,4 +4,5 @@ obj-$(CONFIG_PCI) += pci.o pci-quirks.o
obj-$(CONFIG_SOFTMMU) += platform.o
obj-$(CONFIG_SOFTMMU) += calxeda-xgmac.o
obj-$(CONFIG_SOFTMMU) += amd-xgbe.o
+obj-$(CONFIG_SOFTMMU) += spapr.o
endif
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 27cc1596f9..f3c0522e7e 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -28,6 +28,7 @@
#include "exec/memory.h"
#include "hw/hw.h"
#include "qemu/error-report.h"
+#include "qemu/range.h"
#include "sysemu/kvm.h"
#ifdef CONFIG_KVM
#include "linux/kvm.h"
@@ -241,6 +242,44 @@ static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
return -errno;
}
+static void vfio_host_win_add(VFIOContainer *container,
+ hwaddr min_iova, hwaddr max_iova,
+ uint64_t iova_pgsizes)
+{
+ VFIOHostDMAWindow *hostwin;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (ranges_overlap(hostwin->min_iova,
+ hostwin->max_iova - hostwin->min_iova + 1,
+ min_iova,
+ max_iova - min_iova + 1)) {
+ hw_error("%s: Overlapped IOMMU are not enabled", __func__);
+ }
+ }
+
+ hostwin = g_malloc0(sizeof(*hostwin));
+
+ hostwin->min_iova = min_iova;
+ hostwin->max_iova = max_iova;
+ hostwin->iova_pgsizes = iova_pgsizes;
+ QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
+}
+
+static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
+ hwaddr max_iova)
+{
+ VFIOHostDMAWindow *hostwin;
+
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
+ QLIST_REMOVE(hostwin, hostwin_next);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
static bool vfio_listener_skipped_section(MemoryRegionSection *section)
{
return (!memory_region_is_ram(section->mr) &&
@@ -329,6 +368,8 @@ static void vfio_listener_region_add(MemoryListener *listener,
Int128 llend, llsize;
void *vaddr;
int ret;
+ VFIOHostDMAWindow *hostwin;
+ bool hostwin_found;
if (vfio_listener_skipped_section(section)) {
trace_vfio_listener_region_add_skip(
@@ -354,7 +395,40 @@ static void vfio_listener_region_add(MemoryListener *listener,
}
end = int128_get64(int128_sub(llend, int128_one()));
- if ((iova < container->min_iova) || (end > container->max_iova)) {
+ if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ VFIOHostDMAWindow *hostwin;
+ hwaddr pgsize = 0;
+
+ /* For now intersections are not allowed, we may relax this later */
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (ranges_overlap(hostwin->min_iova,
+ hostwin->max_iova - hostwin->min_iova + 1,
+ section->offset_within_address_space,
+ int128_get64(section->size))) {
+ ret = -1;
+ goto fail;
+ }
+ }
+
+ ret = vfio_spapr_create_window(container, section, &pgsize);
+ if (ret) {
+ goto fail;
+ }
+
+ vfio_host_win_add(container, section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(section->size) - 1, pgsize);
+ }
+
+ hostwin_found = false;
+ QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ if (hostwin->min_iova <= iova && end <= hostwin->max_iova) {
+ hostwin_found = true;
+ break;
+ }
+ }
+
+ if (!hostwin_found) {
error_report("vfio: IOMMU container %p can't map guest IOVA region"
" 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx,
container, iova, end);
@@ -369,10 +443,6 @@ static void vfio_listener_region_add(MemoryListener *listener,
trace_vfio_listener_region_add_iommu(iova, end);
/*
- * FIXME: We should do some checking to see if the
- * capabilities of the host VFIO IOMMU are adequate to model
- * the guest IOMMU
- *
* FIXME: For VFIO iommu types which have KVM acceleration to
* avoid bouncing all map/unmaps through qemu this way, this
* would be the right place to wire that up (tell the KVM
@@ -455,7 +525,8 @@ static void vfio_listener_region_del(MemoryListener *listener,
QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
if (giommu->iommu == section->mr) {
- memory_region_unregister_iommu_notifier(&giommu->n);
+ memory_region_unregister_iommu_notifier(giommu->iommu,
+ &giommu->n);
QLIST_REMOVE(giommu, giommu_next);
g_free(giommu);
break;
@@ -492,6 +563,18 @@ static void vfio_listener_region_del(MemoryListener *listener,
"0x%"HWADDR_PRIx") = %d (%m)",
container, iova, int128_get64(llsize), ret);
}
+
+ if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ vfio_spapr_remove_window(container,
+ section->offset_within_address_space);
+ if (vfio_host_win_del(container,
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(section->size) - 1) < 0) {
+ hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
+ __func__, section->offset_within_address_space);
+ }
+ }
}
static const MemoryListener vfio_memory_listener = {
@@ -502,6 +585,9 @@ static const MemoryListener vfio_memory_listener = {
static void vfio_listener_release(VFIOContainer *container)
{
memory_listener_unregister(&container->listener);
+ if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
+ memory_listener_unregister(&container->prereg_listener);
+ }
}
static struct vfio_info_cap_header *
@@ -860,8 +946,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
goto free_container_exit;
}
- ret = ioctl(fd, VFIO_SET_IOMMU,
- v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU);
+ container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
+ ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
if (ret) {
error_report("vfio: failed to set iommu for container: %m");
ret = -errno;
@@ -875,19 +961,18 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
* existing Type1 IOMMUs generally support any IOVA we're
* going to actually try in practice.
*/
- container->min_iova = 0;
- container->max_iova = (hwaddr)-1;
-
- /* Assume just 4K IOVA page size */
- container->iova_pgsizes = 0x1000;
info.argsz = sizeof(info);
ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
/* Ignore errors */
- if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
- container->iova_pgsizes = info.iova_pgsizes;
+ if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
+ /* Assume 4k IOVA page size */
+ info.iova_pgsizes = 4096;
}
- } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
+ vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
+ } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
+ ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
struct vfio_iommu_spapr_tce_info info;
+ bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
@@ -895,7 +980,9 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
ret = -errno;
goto free_container_exit;
}
- ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_SPAPR_TCE_IOMMU);
+ container->iommu_type =
+ v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
+ ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
if (ret) {
error_report("vfio: failed to set iommu for container: %m");
ret = -errno;
@@ -907,30 +994,54 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as)
* when container fd is closed so we do not call it explicitly
* in this file.
*/
- ret = ioctl(fd, VFIO_IOMMU_ENABLE);
- if (ret) {
- error_report("vfio: failed to enable container: %m");
- ret = -errno;
- goto free_container_exit;
+ if (!v2) {
+ ret = ioctl(fd, VFIO_IOMMU_ENABLE);
+ if (ret) {
+ error_report("vfio: failed to enable container: %m");
+ ret = -errno;
+ goto free_container_exit;
+ }
+ } else {
+ container->prereg_listener = vfio_prereg_listener;
+
+ memory_listener_register(&container->prereg_listener,
+ &address_space_memory);
+ if (container->error) {
+ memory_listener_unregister(&container->prereg_listener);
+ error_report("vfio: RAM memory listener initialization failed for container");
+ goto free_container_exit;
+ }
}
- /*
- * This only considers the host IOMMU's 32-bit window. At
- * some point we need to add support for the optional 64-bit
- * window and dynamic windows
- */
info.argsz = sizeof(info);
ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
if (ret) {
error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m");
ret = -errno;
+ if (v2) {
+ memory_listener_unregister(&container->prereg_listener);
+ }
goto free_container_exit;
}
- container->min_iova = info.dma32_window_start;
- container->max_iova = container->min_iova + info.dma32_window_size - 1;
- /* Assume just 4K IOVA pages for now */
- container->iova_pgsizes = 0x1000;
+ if (v2) {
+ /*
+ * There is a default window in just created container.
+ * To make region_add/del simpler, we better remove this
+ * window now and let those iommu_listener callbacks
+ * create/remove them when needed.
+ */
+ ret = vfio_spapr_remove_window(container, info.dma32_window_start);
+ if (ret) {
+ goto free_container_exit;
+ }
+ } else {
+ /* The default table uses 4K pages */
+ vfio_host_win_add(container, info.dma32_window_start,
+ info.dma32_window_start +
+ info.dma32_window_size - 1,
+ 0x1000);
+ }
} else {
error_report("vfio: No available IOMMU models");
ret = -EINVAL;
@@ -991,7 +1102,7 @@ static void vfio_disconnect_container(VFIOGroup *group)
QLIST_REMOVE(container, next);
QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
- memory_region_unregister_iommu_notifier(&giommu->n);
+ memory_region_unregister_iommu_notifier(giommu->iommu, &giommu->n);
QLIST_REMOVE(giommu, giommu_next);
g_free(giommu);
}
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index 35d32b78f4..bec694c8d8 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -318,7 +318,7 @@ static void vfio_probe_ati_bar4_quirk(VFIOPCIDevice *vdev, int nr)
/* This windows doesn't seem to be used except by legacy VGA code */
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
- !vdev->has_vga || nr != 4) {
+ !vdev->vga || nr != 4) {
return;
}
@@ -366,7 +366,7 @@ static void vfio_probe_ati_bar2_quirk(VFIOPCIDevice *vdev, int nr)
/* Only enable on newer devices where BAR2 is 64bit */
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_ATI, PCI_ANY_ID) ||
- !vdev->has_vga || nr != 2 || !vdev->bars[2].mem64) {
+ !vdev->vga || nr != 2 || !vdev->bars[2].mem64) {
return;
}
@@ -660,7 +660,7 @@ static void vfio_probe_nvidia_bar5_quirk(VFIOPCIDevice *vdev, int nr)
VFIOConfigWindowQuirk *window;
if (!vfio_pci_is(vdev, PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID) ||
- !vdev->has_vga || nr != 5) {
+ !vdev->vga || nr != 5) {
return;
}
@@ -776,7 +776,7 @@ static void vfio_probe_nvidia_bar0_quirk(VFIOPCIDevice *vdev, int nr)
QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next);
/* The 0x1800 offset mirror only seems to get used by legacy VGA */
- if (vdev->has_vga) {
+ if (vdev->vga) {
quirk = g_malloc0(sizeof(*quirk));
mirror = quirk->data = g_malloc0(sizeof(*mirror));
mirror->mem = quirk->mem = g_new0(MemoryRegion, 1);
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 53b87b76ea..f2c679e47c 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -1502,6 +1502,21 @@ static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
return next - pos;
}
+
+static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
+{
+ uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
+
+ for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
+ tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
+ if (tmp > pos && tmp < next) {
+ next = tmp;
+ }
+ }
+
+ return next - pos;
+}
+
static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
{
pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
@@ -1749,16 +1764,100 @@ static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos)
return 0;
}
+static int vfio_add_ext_cap(VFIOPCIDevice *vdev)
+{
+ PCIDevice *pdev = &vdev->pdev;
+ uint32_t header;
+ uint16_t cap_id, next, size;
+ uint8_t cap_ver;
+ uint8_t *config;
+
+ /* Only add extended caps if we have them and the guest can see them */
+ if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) ||
+ !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
+ return 0;
+ }
+
+ /*
+ * pcie_add_capability always inserts the new capability at the tail
+ * of the chain. Therefore to end up with a chain that matches the
+ * physical device, we cache the config space to avoid overwriting
+ * the original config space when we parse the extended capabilities.
+ */
+ config = g_memdup(pdev->config, vdev->config_size);
+
+ /*
+ * Extended capabilities are chained with each pointing to the next, so we
+ * can drop anything other than the head of the chain simply by modifying
+ * the previous next pointer. For the head of the chain, we can modify the
+ * capability ID to something that cannot match a valid capability. ID
+ * 0 is reserved for this since absence of capabilities is indicated by
+ * 0 for the ID, version, AND next pointer. However, pcie_add_capability()
+ * uses ID 0 as reserved for list management and will incorrectly match and
+ * assert if we attempt to pre-load the head of the chain with with this
+ * ID. Use ID 0xFFFF temporarily since it is also seems to be reserved in
+ * part for identifying absence of capabilities in a root complex register
+ * block. If the ID still exists after adding capabilities, switch back to
+ * zero. We'll mark this entire first dword as emulated for this purpose.
+ */
+ pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
+ PCI_EXT_CAP(0xFFFF, 0, 0));
+ pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
+ pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
+
+ for (next = PCI_CONFIG_SPACE_SIZE; next;
+ next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
+ header = pci_get_long(config + next);
+ cap_id = PCI_EXT_CAP_ID(header);
+ cap_ver = PCI_EXT_CAP_VER(header);
+
+ /*
+ * If it becomes important to configure extended capabilities to their
+ * actual size, use this as the default when it's something we don't
+ * recognize. Since QEMU doesn't actually handle many of the config
+ * accesses, exact size doesn't seem worthwhile.
+ */
+ size = vfio_ext_cap_max_size(config, next);
+
+ /* Use emulated next pointer to allow dropping extended caps */
+ pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
+ PCI_EXT_CAP_NEXT_MASK);
+
+ switch (cap_id) {
+ case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
+ trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
+ break;
+ default:
+ pcie_add_capability(pdev, cap_id, cap_ver, next, size);
+ }
+
+ }
+
+ /* Cleanup chain head ID if necessary */
+ if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
+ pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
+ }
+
+ g_free(config);
+ return 0;
+}
+
static int vfio_add_capabilities(VFIOPCIDevice *vdev)
{
PCIDevice *pdev = &vdev->pdev;
+ int ret;
if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
!pdev->config[PCI_CAPABILITY_LIST]) {
return 0; /* Nothing to add */
}
- return vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
+ ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]);
+ if (ret) {
+ return ret;
+ }
+
+ return vfio_add_ext_cap(vdev);
}
static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index b3eb0d838e..7d482d9d21 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -135,7 +135,6 @@ typedef struct VFIOPCIDevice {
int32_t bootindex;
uint32_t igd_gms;
uint8_t pm_cap;
- bool has_vga;
bool pci_aer;
bool req_enabled;
bool has_flr;
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
new file mode 100644
index 0000000000..0af342332c
--- /dev/null
+++ b/hw/vfio/spapr.c
@@ -0,0 +1,210 @@
+/*
+ * DMA memory preregistration
+ *
+ * Authors:
+ * Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+
+#include "hw/vfio/vfio-common.h"
+#include "hw/hw.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section)
+{
+ if (memory_region_is_iommu(section->mr)) {
+ hw_error("Cannot possibly preregister IOMMU memory");
+ }
+
+ return !memory_region_is_ram(section->mr) ||
+ memory_region_is_skip_dump(section->mr);
+}
+
+static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa)
+{
+ return memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (gpa - section->offset_within_address_space);
+}
+
+static void vfio_prereg_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ prereg_listener);
+ const hwaddr gpa = section->offset_within_address_space;
+ hwaddr end;
+ int ret;
+ hwaddr page_mask = qemu_real_host_page_mask;
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0,
+ };
+
+ if (vfio_prereg_listener_skipped_section(section)) {
+ trace_vfio_prereg_listener_region_add_skip(
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~page_mask) ||
+ (section->offset_within_region & ~page_mask) ||
+ (int128_get64(section->size) & ~page_mask))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ end = section->offset_within_address_space + int128_get64(section->size);
+ if (gpa >= end) {
+ return;
+ }
+
+ memory_region_ref(section->mr);
+
+ reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
+ reg.size = end - gpa;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
+ trace_vfio_prereg_register(reg.vaddr, reg.size, ret ? -errno : 0);
+ if (ret) {
+ /*
+ * On the initfn path, store the first error in the container so we
+ * can gracefully fail. Runtime, there's not much we can do other
+ * than throw a hardware error.
+ */
+ if (!container->initialized) {
+ if (!container->error) {
+ container->error = ret;
+ }
+ } else {
+ hw_error("vfio: Memory registering failed, unable to continue");
+ }
+ }
+}
+
+static void vfio_prereg_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container = container_of(listener, VFIOContainer,
+ prereg_listener);
+ const hwaddr gpa = section->offset_within_address_space;
+ hwaddr end;
+ int ret;
+ hwaddr page_mask = qemu_real_host_page_mask;
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0,
+ };
+
+ if (vfio_prereg_listener_skipped_section(section)) {
+ trace_vfio_prereg_listener_region_del_skip(
+ section->offset_within_address_space,
+ section->offset_within_address_space +
+ int128_get64(int128_sub(section->size, int128_one())));
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~page_mask) ||
+ (section->offset_within_region & ~page_mask) ||
+ (int128_get64(section->size) & ~page_mask))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ end = section->offset_within_address_space + int128_get64(section->size);
+ if (gpa >= end) {
+ return;
+ }
+
+ reg.vaddr = (uintptr_t) vfio_prereg_gpa_to_vaddr(section, gpa);
+ reg.size = end - gpa;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
+ trace_vfio_prereg_unregister(reg.vaddr, reg.size, ret ? -errno : 0);
+}
+
+const MemoryListener vfio_prereg_listener = {
+ .region_add = vfio_prereg_listener_region_add,
+ .region_del = vfio_prereg_listener_region_del,
+};
+
+int vfio_spapr_create_window(VFIOContainer *container,
+ MemoryRegionSection *section,
+ hwaddr *pgsize)
+{
+ int ret;
+ unsigned pagesize = memory_region_iommu_get_min_page_size(section->mr);
+ unsigned entries, pages;
+ struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
+
+ /*
+ * FIXME: For VFIO iommu types which have KVM acceleration to
+ * avoid bouncing all map/unmaps through qemu this way, this
+ * would be the right place to wire that up (tell the KVM
+ * device emulation the VFIO iommu handles to use).
+ */
+ create.window_size = int128_get64(section->size);
+ create.page_shift = ctz64(pagesize);
+ /*
+ * SPAPR host supports multilevel TCE tables, there is some
+ * heuristic to decide how many levels we want for our table:
+ * 0..64 = 1; 65..4096 = 2; 4097..262144 = 3; 262145.. = 4
+ */
+ entries = create.window_size >> create.page_shift;
+ pages = MAX((entries * sizeof(uint64_t)) / getpagesize(), 1);
+ pages = MAX(pow2ceil(pages) - 1, 1); /* Round up */
+ create.levels = ctz64(pages) / 6 + 1;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+ if (ret) {
+ error_report("Failed to create a window, ret = %d (%m)", ret);
+ return -errno;
+ }
+
+ if (create.start_addr != section->offset_within_address_space) {
+ vfio_spapr_remove_window(container, create.start_addr);
+
+ error_report("Host doesn't support DMA window at %"HWADDR_PRIx", must be %"PRIx64,
+ section->offset_within_address_space,
+ (uint64_t)create.start_addr);
+ ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
+ return -EINVAL;
+ }
+ trace_vfio_spapr_create_window(create.page_shift,
+ create.window_size,
+ create.start_addr);
+ *pgsize = pagesize;
+
+ return 0;
+}
+
+int vfio_spapr_remove_window(VFIOContainer *container,
+ hwaddr offset_within_address_space)
+{
+ struct vfio_iommu_spapr_tce_remove remove = {
+ .argsz = sizeof(remove),
+ .start_addr = offset_within_address_space,
+ };
+ int ret;
+
+ ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
+ if (ret) {
+ error_report("Failed to remove window at %"PRIx64,
+ (uint64_t)remove.start_addr);
+ return -errno;
+ }
+
+ trace_vfio_spapr_remove_window(offset_within_address_space);
+
+ return 0;
+}
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
index 9da0ff928b..4bb7690c46 100644
--- a/hw/vfio/trace-events
+++ b/hw/vfio/trace-events
@@ -37,6 +37,7 @@ vfio_pci_hot_reset_result(const char *name, const char *result) "%s hot reset: %
vfio_populate_device_config(const char *name, unsigned long size, unsigned long offset, unsigned long flags) "Device %s config:\n size: 0x%lx, offset: 0x%lx, flags: 0x%lx"
vfio_populate_device_get_irq_info_failure(void) "VFIO_DEVICE_GET_IRQ_INFO failure: %m"
vfio_initfn(const char *name, int group_id) " (%s) group %d"
+vfio_add_ext_cap_dropped(const char *name, uint16_t cap, uint16_t offset) "%s %x@%x"
vfio_pci_reset(const char *name) " (%s)"
vfio_pci_reset_flr(const char *name) "%s FLR/VFIO_DEVICE_RESET"
vfio_pci_reset_pm(const char *name) "%s PCI PM Reset"
@@ -114,3 +115,11 @@ vfio_platform_populate_interrupts(int pin, int count, int flags) "- IRQ index %d
vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING"
vfio_platform_start_level_irqfd_injection(int index, int fd, int resamplefd) "IRQ index=%d, fd = %d, resamplefd = %d"
vfio_platform_start_edge_irqfd_injection(int index, int fd) "IRQ index=%d, fd = %d"
+
+# hw/vfio/spapr.c
+vfio_prereg_listener_region_add_skip(uint64_t start, uint64_t end) "%"PRIx64" - %"PRIx64
+vfio_prereg_listener_region_del_skip(uint64_t start, uint64_t end) "%"PRIx64" - %"PRIx64
+vfio_prereg_register(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" size=%"PRIx64" ret=%d"
+vfio_prereg_unregister(uint64_t va, uint64_t size, int ret) "va=%"PRIx64" size=%"PRIx64" ret=%d"
+vfio_spapr_create_window(int ps, uint64_t ws, uint64_t off) "pageshift=0x%x winsize=0x%"PRIx64" offset=0x%"PRIx64
+vfio_spapr_remove_window(uint64_t off) "offset=%"PRIx64
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 81cc5b0ae3..a01394d5ac 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1110,14 +1110,15 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
VirtioBusState *vbus = VIRTIO_BUS(qbus);
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r, e;
- if (!k->set_host_notifier) {
+ if (!k->ioeventfd_started) {
fprintf(stderr, "binding does not support host notifiers\n");
r = -ENOSYS;
goto fail;
}
for (i = 0; i < hdev->nvqs; ++i) {
- r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true);
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ true);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r);
goto fail_vq;
@@ -1127,7 +1128,8 @@ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
return 0;
fail_vq:
while (--i >= 0) {
- e = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ false);
if (e < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r);
fflush(stderr);
@@ -1146,12 +1148,11 @@ fail:
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
{
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
- VirtioBusState *vbus = VIRTIO_BUS(qbus);
- VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
int i, r;
for (i = 0; i < hdev->nvqs; ++i) {
- r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false);
+ r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
+ false);
if (r < 0) {
fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r);
fflush(stderr);
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index 574f0e23f8..131376027b 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -146,6 +146,138 @@ void virtio_bus_set_vdev_config(VirtioBusState *bus, uint8_t *config)
}
}
+/*
+ * This function handles both assigning the ioeventfd handler and
+ * registering it with the kernel.
+ * assign: register/deregister ioeventfd with the kernel
+ * set_handler: use the generic ioeventfd handler
+ */
+static int set_host_notifier_internal(DeviceState *proxy, VirtioBusState *bus,
+ int n, bool assign, bool set_handler)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
+
+ if (assign) {
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %d", __func__, r);
+ return r;
+ }
+ virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
+ r = k->ioeventfd_assign(proxy, notifier, n, assign);
+ if (r < 0) {
+ error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ event_notifier_cleanup(notifier);
+ return r;
+ }
+ } else {
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ k->ioeventfd_assign(proxy, notifier, n, assign);
+ event_notifier_cleanup(notifier);
+ }
+ return r;
+}
+
+void virtio_bus_start_ioeventfd(VirtioBusState *bus)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev;
+ int n, r;
+
+ if (!k->ioeventfd_started || k->ioeventfd_started(proxy)) {
+ return;
+ }
+ if (k->ioeventfd_disabled(proxy)) {
+ return;
+ }
+ vdev = virtio_bus_get_device(bus);
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = set_host_notifier_internal(proxy, bus, n, true, true);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ k->ioeventfd_set_started(proxy, true, false);
+ return;
+
+assign_error:
+ while (--n >= 0) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+
+ r = set_host_notifier_internal(proxy, bus, n, false, false);
+ assert(r >= 0);
+ }
+ k->ioeventfd_set_started(proxy, false, true);
+ error_report("%s: failed. Fallback to userspace (slower).", __func__);
+}
+
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev;
+ int n, r;
+
+ if (!k->ioeventfd_started || !k->ioeventfd_started(proxy)) {
+ return;
+ }
+ vdev = virtio_bus_get_device(bus);
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = set_host_notifier_internal(proxy, bus, n, false, false);
+ assert(r >= 0);
+ }
+ k->ioeventfd_set_started(proxy, false, false);
+}
+
+/*
+ * This function switches from/to the generic ioeventfd handler.
+ * assign==false means 'use generic ioeventfd handler'.
+ */
+int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign)
+{
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(bus);
+ DeviceState *proxy = DEVICE(BUS(bus)->parent);
+ VirtIODevice *vdev = virtio_bus_get_device(bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+
+ if (!k->ioeventfd_started) {
+ return -ENOSYS;
+ }
+ if (assign) {
+ /*
+ * Stop using the generic ioeventfd, we are doing eventfd handling
+ * ourselves below
+ */
+ k->ioeventfd_set_disabled(proxy, true);
+ }
+ /*
+ * Just switch the handler, don't deassign the ioeventfd.
+ * Otherwise, there's a window where we don't have an
+ * ioeventfd and we may end up with a notification where
+ * we don't expect one.
+ */
+ virtio_queue_set_host_notifier_fd_handler(vq, assign, !assign);
+ if (!assign) {
+ /* Use generic ioeventfd handler again. */
+ k->ioeventfd_set_disabled(proxy, false);
+ }
+ return 0;
+}
+
static char *virtio_bus_get_dev_path(DeviceState *dev)
{
BusState *bus = qdev_get_parent_bus(dev);
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index d4cd91f8c4..eb84b74532 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -93,90 +93,59 @@ typedef struct {
bool ioeventfd_started;
} VirtIOMMIOProxy;
-static int virtio_mmio_set_host_notifier_internal(VirtIOMMIOProxy *proxy,
- int n, bool assign,
- bool set_handler)
+static bool virtio_mmio_ioeventfd_started(DeviceState *d)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- int r = 0;
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d",
- __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
- true, n, notifier);
- } else {
- memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
- true, n, notifier);
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
- }
- return r;
+ return proxy->ioeventfd_started;
}
-static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+static void virtio_mmio_ioeventfd_set_started(DeviceState *d, bool started,
+ bool err)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int n, r;
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- if (!kvm_eventfds_enabled() ||
- proxy->ioeventfd_disabled ||
- proxy->ioeventfd_started) {
- return;
- }
+ proxy->ioeventfd_started = started;
+}
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
+static bool virtio_mmio_ioeventfd_disabled(DeviceState *d)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- r = virtio_mmio_set_host_notifier_internal(proxy, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- proxy->ioeventfd_started = true;
- return;
+ return !kvm_eventfds_enabled() || proxy->ioeventfd_disabled;
+}
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
+static void virtio_mmio_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
- error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+ proxy->ioeventfd_disabled = disabled;
}
-static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+static int virtio_mmio_ioeventfd_assign(DeviceState *d,
+ EventNotifier *notifier,
+ int n, bool assign)
{
- int r;
- int n;
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
- if (!proxy->ioeventfd_started) {
- return;
+ if (assign) {
+ memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ true, n, notifier);
+ } else {
+ memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ true, n, notifier);
}
+ return 0;
+}
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
+static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+ virtio_bus_start_ioeventfd(&proxy->bus);
+}
- r = virtio_mmio_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
+static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
+{
+ virtio_bus_stop_ioeventfd(&proxy->bus);
}
static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
@@ -498,25 +467,6 @@ assign_error:
return r;
}
-static int virtio_mmio_set_host_notifier(DeviceState *opaque, int n,
- bool assign)
-{
- VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
-
- /* Stop using ioeventfd for virtqueue kick if the device starts using host
- * notifiers. This makes it easy to avoid stepping on each others' toes.
- */
- proxy->ioeventfd_disabled = assign;
- if (assign) {
- virtio_mmio_stop_ioeventfd(proxy);
- }
- /* We don't need to start here: it's not needed because backend
- * currently only stops on status change away from ok,
- * reset, vmstop and such. If we do add code to start here,
- * need to check vmstate, device state etc. */
- return virtio_mmio_set_host_notifier_internal(proxy, n, assign, false);
-}
-
/* virtio-mmio device */
static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
@@ -558,8 +508,12 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
k->notify = virtio_mmio_update_irq;
k->save_config = virtio_mmio_save_config;
k->load_config = virtio_mmio_load_config;
- k->set_host_notifier = virtio_mmio_set_host_notifier;
k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
+ k->ioeventfd_started = virtio_mmio_ioeventfd_started;
+ k->ioeventfd_set_started = virtio_mmio_ioeventfd_set_started;
+ k->ioeventfd_disabled = virtio_mmio_ioeventfd_disabled;
+ k->ioeventfd_set_disabled = virtio_mmio_ioeventfd_set_disabled;
+ k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
k->has_variable_vring_alignment = true;
bus_class->max_dev = 1;
}
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 1a0278304b..2b34b43060 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -262,14 +262,44 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
return 0;
}
+static bool virtio_pci_ioeventfd_started(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return proxy->ioeventfd_started;
+}
+
+static void virtio_pci_ioeventfd_set_started(DeviceState *d, bool started,
+ bool err)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ proxy->ioeventfd_started = started;
+}
+
+static bool virtio_pci_ioeventfd_disabled(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ return proxy->ioeventfd_disabled ||
+ !(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD);
+}
+
+static void virtio_pci_ioeventfd_set_disabled(DeviceState *d, bool disabled)
+{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
+
+ proxy->ioeventfd_disabled = disabled;
+}
+
#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
-static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
- int n, bool assign, bool set_handler)
+static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier,
+ int n, bool assign)
{
+ VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_get_queue(vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
bool fast_mmio = kvm_ioeventfd_any_length_enabled();
@@ -280,16 +310,8 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
virtio_get_queue_index(vq);
hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
- int r = 0;
if (assign) {
- r = event_notifier_init(notifier, 1);
- if (r < 0) {
- error_report("%s: unable to init event notifier: %d",
- __func__, r);
- return r;
- }
- virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
if (modern) {
if (fast_mmio) {
memory_region_add_eventfd(modern_mr, modern_addr, 0,
@@ -325,68 +347,18 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
true, n, notifier);
}
- virtio_queue_set_host_notifier_fd_handler(vq, false, false);
- event_notifier_cleanup(notifier);
}
- return r;
+ return 0;
}
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int n, r;
-
- if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
- proxy->ioeventfd_disabled ||
- proxy->ioeventfd_started) {
- return;
- }
-
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
-
- r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
- if (r < 0) {
- goto assign_error;
- }
- }
- proxy->ioeventfd_started = true;
- return;
-
-assign_error:
- while (--n >= 0) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
-
- r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
- error_report("%s: failed. Fallback to a userspace (slower).", __func__);
+ virtio_bus_start_ioeventfd(&proxy->bus);
}
static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
{
- VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
- int r;
- int n;
-
- if (!proxy->ioeventfd_started) {
- return;
- }
-
- for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
- if (!virtio_queue_get_num(vdev, n)) {
- continue;
- }
-
- r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
- assert(r >= 0);
- }
- proxy->ioeventfd_started = false;
+ virtio_bus_stop_ioeventfd(&proxy->bus);
}
static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
@@ -1110,24 +1082,6 @@ assign_error:
return r;
}
-static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
-{
- VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
-
- /* Stop using ioeventfd for virtqueue kick if the device starts using host
- * notifiers. This makes it easy to avoid stepping on each others' toes.
- */
- proxy->ioeventfd_disabled = assign;
- if (assign) {
- virtio_pci_stop_ioeventfd(proxy);
- }
- /* We don't need to start here: it's not needed because backend
- * currently only stops on status change away from ok,
- * reset, vmstop and such. If we do add code to start here,
- * need to check vmstate, device state etc. */
- return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
-}
-
static void virtio_pci_vmstate_change(DeviceState *d, bool running)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
@@ -2488,12 +2442,16 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->load_extra_state = virtio_pci_load_extra_state;
k->has_extra_state = virtio_pci_has_extra_state;
k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
- k->set_host_notifier = virtio_pci_set_host_notifier;
k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
k->vmstate_change = virtio_pci_vmstate_change;
k->device_plugged = virtio_pci_device_plugged;
k->device_unplugged = virtio_pci_device_unplugged;
k->query_nvectors = virtio_pci_query_nvectors;
+ k->ioeventfd_started = virtio_pci_ioeventfd_started;
+ k->ioeventfd_set_started = virtio_pci_ioeventfd_set_started;
+ k->ioeventfd_disabled = virtio_pci_ioeventfd_disabled;
+ k->ioeventfd_set_disabled = virtio_pci_ioeventfd_set_disabled;
+ k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
}
static const TypeInfo virtio_pci_bus_info = {
diff --git a/include/crypto/tlscreds.h b/include/crypto/tlscreds.h
index 8e2babd533..59e91875c1 100644
--- a/include/crypto/tlscreds.h
+++ b/include/crypto/tlscreds.h
@@ -54,6 +54,7 @@ struct QCryptoTLSCreds {
gnutls_dh_params_t dh_params;
#endif
bool verifyPeer;
+ char *priority;
};
diff --git a/include/elf.h b/include/elf.h
index 8533b2a8b0..745739ab8c 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -53,6 +53,8 @@ typedef int64_t Elf64_Sxword;
#define EF_MIPS_OPTIONS_FIRST 0x00000080
#define EF_MIPS_32BITMODE 0x00000100
#define EF_MIPS_ABI 0x0000f000
+#define EF_MIPS_FP64 0x00000200
+#define EF_MIPS_NAN2008 0x00000400
#define EF_MIPS_ARCH 0xf0000000
/* These constants define the different elf file types */
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 9f38edf419..6a6796d0c9 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -304,4 +304,6 @@ void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
uint8_t *buf, int len, int is_write);
+int cpu_exec(CPUState *cpu);
+
#endif /* CPU_ALL_H */
diff --git a/include/exec/memory.h b/include/exec/memory.h
index e3829f797a..3e4d4164cd 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -153,6 +153,10 @@ struct MemoryRegionIOMMUOps {
IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
/* Returns minimum supported page size */
uint64_t (*get_min_page_size)(MemoryRegion *iommu);
+ /* Called when the first notifier is set */
+ void (*notify_started)(MemoryRegion *iommu);
+ /* Called when the last notifier is removed */
+ void (*notify_stopped)(MemoryRegion *iommu);
};
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
@@ -441,15 +445,31 @@ void memory_region_init_alias(MemoryRegion *mr,
uint64_t size);
/**
+ * memory_region_init_rom: Initialize a ROM memory region.
+ *
+ * This has the same effect as calling memory_region_init_ram()
+ * and then marking the resulting region read-only with
+ * memory_region_set_readonly().
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @errp: pointer to Error*, to store an error if it happens.
+ */
+void memory_region_init_rom(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp);
+
+/**
* memory_region_init_rom_device: Initialize a ROM memory region. Writes are
* handled via callbacks.
*
- * If NULL callbacks pointer is given, then I/O space is not supposed to be
- * handled by QEMU itself. Any access via the memory API will cause an abort().
- *
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
- * @ops: callbacks for write access handling.
+ * @ops: callbacks for write access handling (must not be NULL).
* @name: the name of the region.
* @size: size of the region.
* @errp: pointer to Error*, to store an error if it happens.
@@ -622,9 +642,11 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write);
* memory_region_unregister_iommu_notifier: unregister a notifier for
* changes to IOMMU translation entries.
*
+ * @mr: the memory region which was observed and for which notity_stopped()
+ * needs to be called
* @n: the notifier to be removed.
*/
-void memory_region_unregister_iommu_notifier(Notifier *n);
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n);
/**
* memory_region_name: get a memory region's name
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index c937062530..95a11032d1 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -205,6 +205,7 @@ typedef struct float_status {
/* should denormalised inputs go to zero and set the input_denormal flag? */
flag flush_inputs_to_zero;
flag default_nan_mode;
+ flag snan_bit_is_one;
} float_status;
static inline void set_float_detect_tininess(int val, float_status *status)
@@ -236,6 +237,10 @@ static inline void set_default_nan_mode(flag val, float_status *status)
{
status->default_nan_mode = val;
}
+static inline void set_snan_bit_is_one(flag val, float_status *status)
+{
+ status->snan_bit_is_one = val;
+}
static inline int get_float_detect_tininess(float_status *status)
{
return status->float_detect_tininess;
@@ -342,9 +347,9 @@ float64 float16_to_float64(float16 a, flag ieee, float_status *status);
/*----------------------------------------------------------------------------
| Software half-precision operations.
*----------------------------------------------------------------------------*/
-int float16_is_quiet_nan( float16 );
-int float16_is_signaling_nan( float16 );
-float16 float16_maybe_silence_nan( float16 );
+int float16_is_quiet_nan(float16, float_status *status);
+int float16_is_signaling_nan(float16, float_status *status);
+float16 float16_maybe_silence_nan(float16, float_status *status);
static inline int float16_is_any_nan(float16 a)
{
@@ -354,7 +359,7 @@ static inline int float16_is_any_nan(float16 a)
/*----------------------------------------------------------------------------
| The pattern for a default generated half-precision NaN.
*----------------------------------------------------------------------------*/
-extern const float16 float16_default_nan;
+float16 float16_default_nan(float_status *status);
/*----------------------------------------------------------------------------
| Software IEC/IEEE single-precision conversion routines.
@@ -404,9 +409,9 @@ float32 float32_minnum(float32, float32, float_status *status);
float32 float32_maxnum(float32, float32, float_status *status);
float32 float32_minnummag(float32, float32, float_status *status);
float32 float32_maxnummag(float32, float32, float_status *status);
-int float32_is_quiet_nan( float32 );
-int float32_is_signaling_nan( float32 );
-float32 float32_maybe_silence_nan( float32 );
+int float32_is_quiet_nan(float32, float_status *status);
+int float32_is_signaling_nan(float32, float_status *status);
+float32 float32_maybe_silence_nan(float32, float_status *status);
float32 float32_scalbn(float32, int, float_status *status);
static inline float32 float32_abs(float32 a)
@@ -466,7 +471,7 @@ static inline float32 float32_set_sign(float32 a, int sign)
/*----------------------------------------------------------------------------
| The pattern for a default generated single-precision NaN.
*----------------------------------------------------------------------------*/
-extern const float32 float32_default_nan;
+float32 float32_default_nan(float_status *status);
/*----------------------------------------------------------------------------
| Software IEC/IEEE double-precision conversion routines.
@@ -516,9 +521,9 @@ float64 float64_minnum(float64, float64, float_status *status);
float64 float64_maxnum(float64, float64, float_status *status);
float64 float64_minnummag(float64, float64, float_status *status);
float64 float64_maxnummag(float64, float64, float_status *status);
-int float64_is_quiet_nan( float64 a );
-int float64_is_signaling_nan( float64 );
-float64 float64_maybe_silence_nan( float64 );
+int float64_is_quiet_nan(float64 a, float_status *status);
+int float64_is_signaling_nan(float64, float_status *status);
+float64 float64_maybe_silence_nan(float64, float_status *status);
float64 float64_scalbn(float64, int, float_status *status);
static inline float64 float64_abs(float64 a)
@@ -578,7 +583,7 @@ static inline float64 float64_set_sign(float64 a, int sign)
/*----------------------------------------------------------------------------
| The pattern for a default generated double-precision NaN.
*----------------------------------------------------------------------------*/
-extern const float64 float64_default_nan;
+float64 float64_default_nan(float_status *status);
/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision conversion routines.
@@ -611,9 +616,9 @@ int floatx80_lt_quiet(floatx80, floatx80, float_status *status);
int floatx80_unordered_quiet(floatx80, floatx80, float_status *status);
int floatx80_compare(floatx80, floatx80, float_status *status);
int floatx80_compare_quiet(floatx80, floatx80, float_status *status);
-int floatx80_is_quiet_nan( floatx80 );
-int floatx80_is_signaling_nan( floatx80 );
-floatx80 floatx80_maybe_silence_nan( floatx80 );
+int floatx80_is_quiet_nan(floatx80, float_status *status);
+int floatx80_is_signaling_nan(floatx80, float_status *status);
+floatx80 floatx80_maybe_silence_nan(floatx80, float_status *status);
floatx80 floatx80_scalbn(floatx80, int, float_status *status);
static inline floatx80 floatx80_abs(floatx80 a)
@@ -663,7 +668,7 @@ static inline int floatx80_is_any_nan(floatx80 a)
/*----------------------------------------------------------------------------
| The pattern for a default generated extended double-precision NaN.
*----------------------------------------------------------------------------*/
-extern const floatx80 floatx80_default_nan;
+floatx80 floatx80_default_nan(float_status *status);
/*----------------------------------------------------------------------------
| Software IEC/IEEE quadruple-precision conversion routines.
@@ -696,9 +701,9 @@ int float128_lt_quiet(float128, float128, float_status *status);
int float128_unordered_quiet(float128, float128, float_status *status);
int float128_compare(float128, float128, float_status *status);
int float128_compare_quiet(float128, float128, float_status *status);
-int float128_is_quiet_nan( float128 );
-int float128_is_signaling_nan( float128 );
-float128 float128_maybe_silence_nan( float128 );
+int float128_is_quiet_nan(float128, float_status *status);
+int float128_is_signaling_nan(float128, float_status *status);
+float128 float128_maybe_silence_nan(float128, float_status *status);
float128 float128_scalbn(float128, int, float_status *status);
static inline float128 float128_abs(float128 a)
@@ -744,6 +749,6 @@ static inline int float128_is_any_nan(float128 a)
/*----------------------------------------------------------------------------
| The pattern for a default generated quadruple-precision NaN.
*----------------------------------------------------------------------------*/
-extern const float128 float128_default_nan;
+float128 float128_default_nan(float_status *status);
#endif /* !SOFTFLOAT_H */
diff --git a/include/glib-compat.h b/include/glib-compat.h
index 03d8b12675..01aa7b37a5 100644
--- a/include/glib-compat.h
+++ b/include/glib-compat.h
@@ -149,6 +149,32 @@ static inline void (g_cond_signal)(CompatGCond *cond)
}
#undef g_cond_signal
+static inline gboolean (g_cond_timed_wait)(CompatGCond *cond,
+ CompatGMutex *mutex,
+ GTimeVal *time)
+{
+ g_assert(mutex->once.status != G_ONCE_STATUS_PROGRESS);
+ g_once(&cond->once, do_g_cond_new, NULL);
+ return g_cond_timed_wait((GCond *) cond->once.retval,
+ (GMutex *) mutex->once.retval, time);
+}
+#undef g_cond_timed_wait
+
+/* This is not a macro, because it didn't exist until 2.32. */
+static inline gboolean g_cond_wait_until(CompatGCond *cond, CompatGMutex *mutex,
+ gint64 end_time)
+{
+ GTimeVal time;
+
+ /* Convert from monotonic to CLOCK_REALTIME. */
+ end_time -= g_get_monotonic_time();
+ g_get_current_time(&time);
+ end_time += time.tv_sec * G_TIME_SPAN_SECOND + time.tv_usec;
+
+ time.tv_sec = end_time / G_TIME_SPAN_SECOND;
+ time.tv_usec = end_time % G_TIME_SPAN_SECOND;
+ return g_cond_timed_wait(cond, mutex, &time);
+}
/* before 2.31 there was no g_thread_new() */
static inline GThread *g_thread_new(const char *name,
diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h
index a0c4a336f2..da4ef7fbd3 100644
--- a/include/hw/acpi/acpi_dev_interface.h
+++ b/include/hw/acpi/acpi_dev_interface.h
@@ -3,6 +3,7 @@
#include "qom/object.h"
#include "qapi-types.h"
+#include "hw/boards.h"
/* These values are part of guest ABI, and can not be changed */
typedef enum {
@@ -37,6 +38,10 @@ void acpi_send_event(DeviceState *dev, AcpiEventStatusBits event);
* ospm_status: returns status of ACPI device objects, reported
* via _OST method if device supports it.
* send_event: inject a specified event into guest
+ * madt_cpu: fills @entry with Interrupt Controller Structure
+ * for CPU indexed by @uid in @apic_ids array,
+ * returned structure types are:
+ * 0 - Local APIC, 9 - Local x2APIC, 0xB - GICC
*
* Interface is designed for providing unified interface
* to generic ACPI functionality that could be used without
@@ -50,5 +55,7 @@ typedef struct AcpiDeviceIfClass {
/* <public> */
void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev);
+ void (*madt_cpu)(AcpiDeviceIf *adev, int uid,
+ CPUArchIdList *apic_ids, GArray *entry);
} AcpiDeviceIfClass;
#endif
diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h
index 10c09ca29f..e7a1a4cefd 100644
--- a/include/hw/acpi/aml-build.h
+++ b/include/hw/acpi/aml-build.h
@@ -277,6 +277,8 @@ Aml *aml_call1(const char *method, Aml *arg1);
Aml *aml_call2(const char *method, Aml *arg1, Aml *arg2);
Aml *aml_call3(const char *method, Aml *arg1, Aml *arg2, Aml *arg3);
Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4);
+Aml *aml_call5(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
+ Aml *arg5);
Aml *aml_gpio_int(AmlConsumerAndProducer con_and_pro,
AmlLevelAndEdge edge_level,
AmlActiveHighAndLow active_level, AmlShared shared,
@@ -363,6 +365,7 @@ Aml *aml_refof(Aml *arg);
Aml *aml_derefof(Aml *arg);
Aml *aml_sizeof(Aml *arg);
Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target);
+Aml *aml_object_type(Aml *object);
void
build_header(BIOSLinker *linker, GArray *table_data,
diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
new file mode 100644
index 0000000000..89ce172941
--- /dev/null
+++ b/include/hw/acpi/cpu.h
@@ -0,0 +1,67 @@
+/*
+ * QEMU ACPI hotplug utilities
+ *
+ * Copyright (C) 2016 Red Hat Inc
+ *
+ * Authors:
+ * Igor Mammedov <imammedo@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef ACPI_CPU_H
+#define ACPI_CPU_H
+
+#include "hw/qdev-core.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/hotplug.h"
+
+typedef struct AcpiCpuStatus {
+ struct CPUState *cpu;
+ uint64_t arch_id;
+ bool is_inserting;
+ bool is_removing;
+ uint32_t ost_event;
+ uint32_t ost_status;
+} AcpiCpuStatus;
+
+typedef struct CPUHotplugState {
+ MemoryRegion ctrl_reg;
+ uint32_t selector;
+ uint8_t command;
+ uint32_t dev_count;
+ AcpiCpuStatus *devs;
+} CPUHotplugState;
+
+void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
+ CPUHotplugState *cpu_st, DeviceState *dev, Error **errp);
+
+void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
+ CPUHotplugState *cpu_st,
+ DeviceState *dev, Error **errp);
+
+void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
+ DeviceState *dev, Error **errp);
+
+void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
+ CPUHotplugState *state, hwaddr base_addr);
+
+typedef struct CPUHotplugFeatures {
+ bool apci_1_compatible;
+ bool has_legacy_cphp;
+} CPUHotplugFeatures;
+
+void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
+ hwaddr io_base,
+ const char *res_root,
+ const char *event_handler_method);
+
+void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list);
+
+extern const VMStateDescription vmstate_cpu_hotplug;
+#define VMSTATE_CPU_HOTPLUG(cpuhp, state) \
+ VMSTATE_STRUCT(cpuhp, state, 1, \
+ vmstate_cpu_hotplug, CPUHotplugState)
+
+#endif
diff --git a/include/hw/acpi/cpu_hotplug.h b/include/hw/acpi/cpu_hotplug.h
index 6fef67ec14..b995ef2ebd 100644
--- a/include/hw/acpi/cpu_hotplug.h
+++ b/include/hw/acpi/cpu_hotplug.h
@@ -16,8 +16,10 @@
#include "hw/acpi/pc-hotplug.h"
#include "hw/acpi/aml-build.h"
#include "hw/hotplug.h"
+#include "hw/acpi/cpu.h"
typedef struct AcpiCpuHotplug {
+ Object *device;
MemoryRegion io;
uint8_t sts[ACPI_GPE_PROC_LEN];
} AcpiCpuHotplug;
@@ -28,6 +30,10 @@ void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
AcpiCpuHotplug *gpe_cpu, uint16_t base);
+void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
+ CPUHotplugState *cpuhp_state,
+ uint16_t io_port);
+
void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
uint16_t io_base);
#endif
diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h
index bbd657c59b..a352c94fde 100644
--- a/include/hw/acpi/ich9.h
+++ b/include/hw/acpi/ich9.h
@@ -23,6 +23,7 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/cpu_hotplug.h"
+#include "hw/acpi/cpu.h"
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/acpi_dev_interface.h"
#include "hw/acpi/tco.h"
@@ -48,7 +49,9 @@ typedef struct ICH9LPCPMRegs {
uint32_t pm_io_base;
Notifier powerdown_notifier;
+ bool cpu_hotplug_legacy;
AcpiCpuHotplug gpe_cpu;
+ CPUHotplugState cpuhp_state;
MemHotplugState acpi_memory_hotplug;
diff --git a/include/hw/acpi/ipmi.h b/include/hw/acpi/ipmi.h
new file mode 100644
index 0000000000..ab2bb29048
--- /dev/null
+++ b/include/hw/acpi/ipmi.h
@@ -0,0 +1,22 @@
+/*
+ * QEMU IPMI ACPI handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard <cminyard@mvista.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef HW_ACPI_IPMI_H
+#define HW_ACPI_IPMI_H
+
+#include "qemu/osdep.h"
+#include "hw/acpi/aml-build.h"
+
+/*
+ * Add ACPI IPMI entries for all registered IPMI devices whose parent
+ * bus matches the given bus. The resource is the ACPI resource that
+ * contains the IPMI device, this is required for the I2C CRS.
+ */
+void build_acpi_ipmi_devices(Aml *table, BusState *bus);
+
+#endif /* HW_ACPI_IPMI_H */
diff --git a/include/hw/arm/ast2400.h b/include/hw/arm/ast2400.h
index c05ed53767..7833bc716c 100644
--- a/include/hw/arm/ast2400.h
+++ b/include/hw/arm/ast2400.h
@@ -14,8 +14,10 @@
#include "hw/arm/arm.h"
#include "hw/intc/aspeed_vic.h"
+#include "hw/misc/aspeed_scu.h"
#include "hw/timer/aspeed_timer.h"
#include "hw/i2c/aspeed_i2c.h"
+#include "hw/ssi/aspeed_smc.h"
typedef struct AST2400State {
/*< private >*/
@@ -27,6 +29,9 @@ typedef struct AST2400State {
AspeedVICState vic;
AspeedTimerCtrlState timerctrl;
AspeedI2CState i2c;
+ AspeedSCUState scu;
+ AspeedSMCState smc;
+ AspeedSMCState spi;
} AST2400State;
#define TYPE_AST2400 "ast2400"
diff --git a/include/hw/audio/pcspk.h b/include/hw/audio/pcspk.h
index ef95dd1360..33e46a53d0 100644
--- a/include/hw/audio/pcspk.h
+++ b/include/hw/audio/pcspk.h
@@ -38,7 +38,7 @@ static inline ISADevice *pcspk_init(ISABus *bus, ISADevice *pit)
isadev = isa_create(bus, TYPE_PC_SPEAKER);
dev = DEVICE(isadev);
qdev_prop_set_uint32(dev, "iobase", 0x61);
- qdev_prop_set_ptr(dev, "pit", pit);
+ object_property_set_link(OBJECT(dev), OBJECT(pit), "pit", NULL);
qdev_init_nofail(dev);
return isadev;
diff --git a/include/hw/block/flash.h b/include/hw/block/flash.h
index 50ccbbcf13..a088baa4e4 100644
--- a/include/hw/block/flash.h
+++ b/include/hw/block/flash.h
@@ -5,6 +5,9 @@
#include "exec/memory.h"
+#define TYPE_CFI_PFLASH01 "cfi.pflash01"
+#define TYPE_CFI_PFLASH02 "cfi.pflash02"
+
typedef struct pflash_t pflash_t;
/* pflash_cfi01.c */
diff --git a/include/hw/char/serial.h b/include/hw/char/serial.h
index 15beb6b45c..9feddc613c 100644
--- a/include/hw/char/serial.h
+++ b/include/hw/char/serial.h
@@ -55,7 +55,8 @@ struct SerialState {
int last_break_enable;
int it_shift;
int baudbase;
- int tsr_retry;
+ uint32_t tsr_retry;
+ guint watch_tag;
uint32_t wakeup;
/* Time when the last byte was successfully sent out of the tsr */
diff --git a/include/hw/cpu/core.h b/include/hw/cpu/core.h
index 4540a7d34f..79ac79c29c 100644
--- a/include/hw/cpu/core.h
+++ b/include/hw/cpu/core.h
@@ -26,6 +26,9 @@ typedef struct CPUCore {
int nr_threads;
} CPUCore;
+/* Note: topology field names need to be kept in sync with
+ * 'CpuInstanceProperties' */
+
#define CPU_CORE_PROP_CORE_ID "core-id"
#endif
diff --git a/include/hw/dma/xlnx-zynq-devcfg.h b/include/hw/dma/xlnx-zynq-devcfg.h
new file mode 100644
index 0000000000..d40e5c8df6
--- /dev/null
+++ b/include/hw/dma/xlnx-zynq-devcfg.h
@@ -0,0 +1,62 @@
+/*
+ * QEMU model of the Xilinx Devcfg Interface
+ *
+ * (C) 2011 PetaLogix Pty Ltd
+ * (C) 2014 Xilinx Inc.
+ * Written by Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef XLNX_ZYNQ_DEVCFG_H
+
+#include "hw/register.h"
+#include "hw/sysbus.h"
+
+#define TYPE_XLNX_ZYNQ_DEVCFG "xlnx.ps7-dev-cfg"
+
+#define XLNX_ZYNQ_DEVCFG(obj) \
+ OBJECT_CHECK(XlnxZynqDevcfg, (obj), TYPE_XLNX_ZYNQ_DEVCFG)
+
+#define XLNX_ZYNQ_DEVCFG_R_MAX 0x118
+
+#define XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN 10
+
+typedef struct XlnxZynqDevcfgDMACmd {
+ uint32_t src_addr;
+ uint32_t dest_addr;
+ uint32_t src_len;
+ uint32_t dest_len;
+} XlnxZynqDevcfgDMACmd;
+
+typedef struct XlnxZynqDevcfg {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ qemu_irq irq;
+
+ XlnxZynqDevcfgDMACmd dma_cmd_fifo[XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN];
+ uint8_t dma_cmd_fifo_num;
+
+ uint32_t regs[XLNX_ZYNQ_DEVCFG_R_MAX];
+ RegisterInfo regs_info[XLNX_ZYNQ_DEVCFG_R_MAX];
+} XlnxZynqDevcfg;
+
+#define XLNX_ZYNQ_DEVCFG_H
+#endif
diff --git a/include/hw/i386/ich9.h b/include/hw/i386/ich9.h
index 88233c3077..5fd7e97d23 100644
--- a/include/hw/i386/ich9.h
+++ b/include/hw/i386/ich9.h
@@ -45,6 +45,7 @@ typedef struct ICH9LPCState {
APMState apm;
ICH9LPCPMRegs pm;
uint32_t sci_level; /* track sci level */
+ uint8_t sci_gsi;
/* 2.24 Pin Straps */
struct {
@@ -68,8 +69,7 @@ typedef struct ICH9LPCState {
MemoryRegion rcrb_mem; /* root complex register block */
Notifier machine_ready;
- qemu_irq *pic;
- qemu_irq *ioapic;
+ qemu_irq gsi[GSI_NUM_PINS];
} ICH9LPCState;
Object *ich9_lpc_find(void);
@@ -177,6 +177,8 @@ Object *ich9_lpc_find(void);
#define ICH9_LPC_PIC_NUM_PINS 16
#define ICH9_LPC_IOAPIC_NUM_PINS 24
+#define ICH9_GPIO_GSI "gsi"
+
/* D31:F2 SATA Controller #1 */
#define ICH9_SATA1_DEV 31
#define ICH9_SATA1_FUNC 2
@@ -208,6 +210,8 @@ Object *ich9_lpc_find(void);
/* D31:F3 SMBus controller */
+#define TYPE_ICH9_SMB_DEVICE "ICH9 SMB"
+
#define ICH9_A2_SMB_REVISION 0x02
#define ICH9_SMB_PI 0x00
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index a112efb95f..fe5444dfb6 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -17,6 +17,7 @@
#include "hw/compat.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
+#include "hw/acpi/acpi_dev_interface.h"
#define HPET_INTCAP "hpet-intcap"
@@ -71,7 +72,6 @@ struct PCMachineState {
/* NUMA information: */
uint64_t numa_nodes;
uint64_t *node_mem;
- uint64_t *node_cpu;
};
#define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device"
@@ -136,6 +136,8 @@ struct PCMachineClass {
/* TSC rate migration: */
bool save_tsc_khz;
+ /* generate legacy CPU hotplug AML */
+ bool legacy_cpu_hotplug;
};
#define TYPE_PC_MACHINE "generic-pc-machine"
@@ -199,11 +201,12 @@ typedef struct GSIState {
void gsi_handler(void *opaque, int n, int level);
/* vmport.c */
+#define TYPE_VMPORT "vmport"
typedef uint32_t (VMPortReadFunc)(void *opaque, uint32_t address);
static inline void vmport_init(ISABus *bus)
{
- isa_create_simple(bus, "vmport");
+ isa_create_simple(bus, TYPE_VMPORT);
}
void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque);
@@ -211,6 +214,7 @@ void vmmouse_get_data(uint32_t *data);
void vmmouse_set_data(const uint32_t *data);
/* pckbd.c */
+#define I8042_A20_LINE "a20"
void i8042_init(qemu_irq kbd_irq, qemu_irq mouse_irq, uint32_t io_base);
void i8042_mm_init(qemu_irq kbd_irq, qemu_irq mouse_irq,
@@ -237,6 +241,8 @@ void pc_guest_info_init(PCMachineState *pcms);
#define PCI_HOST_PROP_PCI_HOLE64_START "pci-hole64-start"
#define PCI_HOST_PROP_PCI_HOLE64_END "pci-hole64-end"
#define PCI_HOST_PROP_PCI_HOLE64_SIZE "pci-hole64-size"
+#define PCI_HOST_BELOW_4G_MEM_SIZE "below-4g-mem-size"
+#define PCI_HOST_ABOVE_4G_MEM_SIZE "above-4g-mem-size"
#define DEFAULT_PCI_HOLE64_SIZE (~0x0ULL)
@@ -271,6 +277,8 @@ int cmos_get_fd_drive_type(FloppyDriveType fd0);
#define FW_CFG_IO_BASE 0x510
+#define PORT92_A20_LINE "a20"
+
/* acpi_piix.c */
I2CBus *piix4_pm_init(PCIBus *bus, int devfn, uint32_t smb_io_base,
@@ -345,6 +353,10 @@ void pc_system_firmware_init(MemoryRegion *rom_memory,
/* pvpanic.c */
uint16_t pvpanic_port(void);
+/* acpi-build.c */
+void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
+ CPUArchIdList *apic_ids, GArray *entry);
+
/* e820 types */
#define E820_RAM 1
#define E820_RESERVED 2
diff --git a/hw/ide/ahci.h b/include/hw/ide/ahci.h
index bc777ed5c2..bc777ed5c2 100644
--- a/hw/ide/ahci.h
+++ b/include/hw/ide/ahci.h
diff --git a/hw/ide/internal.h b/include/hw/ide/internal.h
index 773928af77..773928af77 100644
--- a/hw/ide/internal.h
+++ b/include/hw/ide/internal.h
diff --git a/hw/ide/pci.h b/include/hw/ide/pci.h
index 0f2d4b91a7..0f2d4b91a7 100644
--- a/hw/ide/pci.h
+++ b/include/hw/ide/pci.h
diff --git a/include/hw/isa/isa.h b/include/hw/isa/isa.h
index c87fbad47a..7693ac5454 100644
--- a/include/hw/isa/isa.h
+++ b/include/hw/isa/isa.h
@@ -102,6 +102,7 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion *address_space,
void isa_bus_irqs(ISABus *bus, qemu_irq *irqs);
qemu_irq isa_get_irq(ISADevice *dev, int isairq);
void isa_init_irq(ISADevice *dev, qemu_irq *p, int isairq);
+void isa_connect_gpio_out(ISADevice *isadev, int gpioirq, int isairq);
void isa_bus_dma(ISABus *bus, IsaDma *dma8, IsaDma *dma16);
IsaDma *isa_get_dma(ISABus *bus, int nchan);
MemoryRegion *isa_address_space(ISADevice *dev);
diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h
index 60ee92b85a..1cfe9e01c4 100644
--- a/include/hw/mem/nvdimm.h
+++ b/include/hw/mem/nvdimm.h
@@ -34,7 +34,60 @@
} \
} while (0)
-#define TYPE_NVDIMM "nvdimm"
+/*
+ * The minimum label data size is required by NVDIMM Namespace
+ * specification, see the chapter 2 Namespaces:
+ * "NVDIMMs following the NVDIMM Block Mode Specification use an area
+ * at least 128KB in size, which holds around 1000 labels."
+ */
+#define MIN_NAMESPACE_LABEL_SIZE (128UL << 10)
+
+#define TYPE_NVDIMM "nvdimm"
+#define NVDIMM(obj) OBJECT_CHECK(NVDIMMDevice, (obj), TYPE_NVDIMM)
+#define NVDIMM_CLASS(oc) OBJECT_CLASS_CHECK(NVDIMMClass, (oc), TYPE_NVDIMM)
+#define NVDIMM_GET_CLASS(obj) OBJECT_GET_CLASS(NVDIMMClass, (obj), \
+ TYPE_NVDIMM)
+struct NVDIMMDevice {
+ /* private */
+ PCDIMMDevice parent_obj;
+
+ /* public */
+
+ /*
+ * the size of label data in NVDIMM device which is presented to
+ * guest via __DSM "Get Namespace Label Size" function.
+ */
+ uint64_t label_size;
+
+ /*
+ * the address of label data which is read by __DSM "Get Namespace
+ * Label Data" function and written by __DSM "Set Namespace Label
+ * Data" function.
+ */
+ void *label_data;
+
+ /*
+ * it's the PMEM region in NVDIMM device, which is presented to
+ * guest via ACPI NFIT and _FIT method if NVDIMM hotplug is supported.
+ */
+ MemoryRegion nvdimm_mr;
+};
+typedef struct NVDIMMDevice NVDIMMDevice;
+
+struct NVDIMMClass {
+ /* private */
+ PCDIMMDeviceClass parent_class;
+
+ /* public */
+
+ /* read @size bytes from NVDIMM label data at @offset into @buf. */
+ void (*read_label_data)(NVDIMMDevice *nvdimm, void *buf,
+ uint64_t size, uint64_t offset);
+ /* write @size bytes from @buf to NVDIMM label data at @offset. */
+ void (*write_label_data)(NVDIMMDevice *nvdimm, const void *buf,
+ uint64_t size, uint64_t offset);
+};
+typedef struct NVDIMMClass NVDIMMClass;
#define NVDIMM_DSM_MEM_FILE "etc/acpi/nvdimm-mem"
diff --git a/include/hw/mem/pc-dimm.h b/include/hw/mem/pc-dimm.h
index 67e92d8f7b..1e483f2670 100644
--- a/include/hw/mem/pc-dimm.h
+++ b/include/hw/mem/pc-dimm.h
@@ -61,7 +61,9 @@ typedef struct PCDIMMDevice {
* @realize: called after common dimm is realized so that the dimm based
* devices get the chance to do specified operations.
* @get_memory_region: returns #MemoryRegion associated with @dimm which
- * is directly mapped into the physical address space of guest
+ * is directly mapped into the physical address space of guest.
+ * @get_vmstate_memory_region: returns #MemoryRegion which indicates the
+ * memory of @dimm should be kept during live migration.
*/
typedef struct PCDIMMDeviceClass {
/* private */
@@ -70,6 +72,7 @@ typedef struct PCDIMMDeviceClass {
/* public */
void (*realize)(PCDIMMDevice *dimm, Error **errp);
MemoryRegion *(*get_memory_region)(PCDIMMDevice *dimm);
+ MemoryRegion *(*get_vmstate_memory_region)(PCDIMMDevice *dimm);
} PCDIMMDeviceClass;
/**
diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h
new file mode 100644
index 0000000000..6b8e46f85f
--- /dev/null
+++ b/include/hw/misc/aspeed_scu.h
@@ -0,0 +1,34 @@
+/*
+ * ASPEED System Control Unit
+ *
+ * Andrew Jeffery <andrew@aj.id.au>
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * This code is licensed under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ */
+#ifndef ASPEED_SCU_H
+#define ASPEED_SCU_H
+
+#include "hw/sysbus.h"
+
+#define TYPE_ASPEED_SCU "aspeed.scu"
+#define ASPEED_SCU(obj) OBJECT_CHECK(AspeedSCUState, (obj), TYPE_ASPEED_SCU)
+
+#define ASPEED_SCU_NR_REGS (0x1A8 >> 2)
+
+typedef struct AspeedSCUState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+
+ /*< public >*/
+ MemoryRegion iomem;
+
+ uint32_t regs[ASPEED_SCU_NR_REGS];
+ uint32_t silicon_rev;
+ uint32_t hw_strap1;
+ uint32_t hw_strap2;
+} AspeedSCUState;
+
+#endif /* ASPEED_SCU_H */
diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index c5c073ddea..1075f3ea50 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -56,8 +56,8 @@ typedef struct MCHPCIState {
MemoryRegion smram, low_smram, high_smram;
MemoryRegion tseg_blackhole, tseg_window;
PcPciInfo pci_info;
- ram_addr_t below_4g_mem_size;
- ram_addr_t above_4g_mem_size;
+ uint64_t below_4g_mem_size;
+ uint64_t above_4g_mem_size;
uint64_t pci_hole64_size;
uint32_t short_root_bus;
IntelIOMMUState *iommu;
@@ -78,6 +78,11 @@ typedef struct Q35PCIHost {
* gmch part
*/
+#define MCH_HOST_PROP_RAM_MEM "ram-mem"
+#define MCH_HOST_PROP_PCI_MEM "pci-mem"
+#define MCH_HOST_PROP_SYSTEM_MEM "system-mem"
+#define MCH_HOST_PROP_IO_MEM "io-mem"
+
/* PCI configuration */
#define MCH_HOST_BRIDGE "MCH"
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 7848366b2a..193631d2dc 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -32,6 +32,8 @@
#define SPAPR_PCI_HOST_BRIDGE(obj) \
OBJECT_CHECK(sPAPRPHBState, (obj), TYPE_SPAPR_PCI_HOST_BRIDGE)
+#define SPAPR_PCI_DMA_MAX_WINDOWS 2
+
typedef struct sPAPRPHBState sPAPRPHBState;
typedef struct spapr_pci_msi {
@@ -56,7 +58,7 @@ struct sPAPRPHBState {
hwaddr mem_win_addr, mem_win_size, io_win_addr, io_win_size;
MemoryRegion memwindow, iowindow, msiwindow;
- uint32_t dma_liobn;
+ uint32_t dma_liobn[SPAPR_PCI_DMA_MAX_WINDOWS];
hwaddr dma_win_addr, dma_win_size;
AddressSpace iommu_as;
MemoryRegion iommu_root;
@@ -71,6 +73,10 @@ struct sPAPRPHBState {
spapr_pci_msi_mig *msi_devs;
QLIST_ENTRY(sPAPRPHBState) list;
+
+ bool ddw_enabled;
+ uint64_t page_size_mask;
+ uint64_t dma64_win_addr;
};
#define SPAPR_PCI_MAX_INDEX 255
@@ -93,7 +99,7 @@ static inline qemu_irq spapr_phb_lsi_qirq(struct sPAPRPHBState *phb, int pin)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- return xics_get_qirq(spapr->icp, phb->lsi_table[pin].irq);
+ return xics_get_qirq(spapr->xics, phb->lsi_table[pin].irq);
}
PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index);
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index e1f8274cf4..2e2dd14c30 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -52,7 +52,7 @@ struct sPAPRMachineState {
struct VIOsPAPRBus *vio_bus;
QLIST_HEAD(, sPAPRPHBState) phbs;
struct sPAPRNVRAM *nvram;
- XICSState *icp;
+ XICSState *xics;
DeviceState *rtc;
void *htab;
@@ -416,6 +416,16 @@ int spapr_allocate_irq_block(int num, bool lsi, bool msi);
#define RTAS_OUT_NOT_AUTHORIZED -9002
#define RTAS_OUT_SYSPARM_PARAM_ERROR -9999
+/* DDW pagesize mask values from ibm,query-pe-dma-window */
+#define RTAS_DDW_PGSIZE_4K 0x01
+#define RTAS_DDW_PGSIZE_64K 0x02
+#define RTAS_DDW_PGSIZE_16M 0x04
+#define RTAS_DDW_PGSIZE_32M 0x08
+#define RTAS_DDW_PGSIZE_64M 0x10
+#define RTAS_DDW_PGSIZE_128M 0x20
+#define RTAS_DDW_PGSIZE_256M 0x40
+#define RTAS_DDW_PGSIZE_16G 0x80
+
/* RTAS tokens */
#define RTAS_TOKEN_BASE 0x2000
@@ -457,8 +467,12 @@ int spapr_allocate_irq_block(int num, bool lsi, bool msi);
#define RTAS_IBM_SET_SLOT_RESET (RTAS_TOKEN_BASE + 0x23)
#define RTAS_IBM_CONFIGURE_PE (RTAS_TOKEN_BASE + 0x24)
#define RTAS_IBM_SLOT_ERROR_DETAIL (RTAS_TOKEN_BASE + 0x25)
+#define RTAS_IBM_QUERY_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x26)
+#define RTAS_IBM_CREATE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x27)
+#define RTAS_IBM_REMOVE_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x28)
+#define RTAS_IBM_RESET_PE_DMA_WINDOW (RTAS_TOKEN_BASE + 0x29)
-#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x26)
+#define RTAS_TOKEN_MAX (RTAS_TOKEN_BASE + 0x2A)
/* RTAS ibm,get-system-parameter token values */
#define RTAS_SYSPARM_SPLPAR_CHARACTERISTICS 20
diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
index 5f8b0422f1..bdb5d2f308 100644
--- a/include/hw/ppc/spapr_vio.h
+++ b/include/hw/ppc/spapr_vio.h
@@ -90,7 +90,7 @@ static inline qemu_irq spapr_vio_qirq(VIOsPAPRDevice *dev)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- return xics_get_qirq(spapr->icp, dev->irq);
+ return xics_get_qirq(spapr->xics, dev->irq);
}
static inline bool spapr_vio_dma_valid(VIOsPAPRDevice *dev, uint64_t taddr,
diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h
index 9091054003..6189a3bff7 100644
--- a/include/hw/ppc/xics.h
+++ b/include/hw/ppc/xics.h
@@ -32,20 +32,25 @@
#define TYPE_XICS_COMMON "xics-common"
#define XICS_COMMON(obj) OBJECT_CHECK(XICSState, (obj), TYPE_XICS_COMMON)
-#define TYPE_XICS "xics"
-#define XICS(obj) OBJECT_CHECK(XICSState, (obj), TYPE_XICS)
+/*
+ * Retain xics as the type name to be compatible for migration. Rest all the
+ * functions, class and variables are renamed as xics_spapr.
+ */
+#define TYPE_XICS_SPAPR "xics"
+#define XICS_SPAPR(obj) OBJECT_CHECK(XICSState, (obj), TYPE_XICS_SPAPR)
-#define TYPE_KVM_XICS "xics-kvm"
-#define KVM_XICS(obj) OBJECT_CHECK(KVMXICSState, (obj), TYPE_KVM_XICS)
+#define TYPE_XICS_SPAPR_KVM "xics-spapr-kvm"
+#define XICS_SPAPR_KVM(obj) \
+ OBJECT_CHECK(KVMXICSState, (obj), TYPE_XICS_SPAPR_KVM)
#define XICS_COMMON_CLASS(klass) \
OBJECT_CLASS_CHECK(XICSStateClass, (klass), TYPE_XICS_COMMON)
-#define XICS_CLASS(klass) \
- OBJECT_CLASS_CHECK(XICSStateClass, (klass), TYPE_XICS)
+#define XICS_SPAPR_CLASS(klass) \
+ OBJECT_CLASS_CHECK(XICSStateClass, (klass), TYPE_XICS_SPAPR)
#define XICS_COMMON_GET_CLASS(obj) \
OBJECT_GET_CLASS(XICSStateClass, (obj), TYPE_XICS_COMMON)
-#define XICS_GET_CLASS(obj) \
- OBJECT_GET_CLASS(XICSStateClass, (obj), TYPE_XICS)
+#define XICS_SPAPR_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(XICSStateClass, (obj), TYPE_XICS_SPAPR)
#define XICS_IPI 0x2
#define XICS_BUID 0x1
@@ -138,9 +143,15 @@ struct ICSState {
uint32_t offset;
qemu_irq *qirqs;
ICSIRQState *irqs;
- XICSState *icp;
+ XICSState *xics;
};
+static inline bool ics_valid_irq(ICSState *ics, uint32_t nr)
+{
+ return (nr >= ics->offset)
+ && (nr < (ics->offset + ics->nr_irqs));
+}
+
struct ICSIRQState {
uint32_t server;
uint8_t priority;
@@ -157,16 +168,32 @@ struct ICSIRQState {
uint8_t flags;
};
-#define XICS_IRQS 1024
+#define XICS_IRQS_SPAPR 1024
qemu_irq xics_get_qirq(XICSState *icp, int irq);
-void xics_set_irq_type(XICSState *icp, int irq, bool lsi);
-int xics_alloc(XICSState *icp, int src, int irq_hint, bool lsi, Error **errp);
-int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align,
+int xics_spapr_alloc(XICSState *icp, int src, int irq_hint, bool lsi,
Error **errp);
-void xics_free(XICSState *icp, int irq, int num);
+int xics_spapr_alloc_block(XICSState *icp, int src, int num, bool lsi,
+ bool align, Error **errp);
+void xics_spapr_free(XICSState *icp, int irq, int num);
void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu);
void xics_cpu_destroy(XICSState *icp, PowerPCCPU *cpu);
+/* Internal XICS interfaces */
+int xics_get_cpu_index_by_dt_id(int cpu_dt_id);
+
+void icp_set_cppr(XICSState *icp, int server, uint8_t cppr);
+void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr);
+uint32_t icp_accept(ICPState *ss);
+uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr);
+void icp_eoi(XICSState *icp, int server, uint32_t xirr);
+
+void ics_write_xive(ICSState *ics, int nr, int server,
+ uint8_t priority, uint8_t saved_priority);
+
+void ics_set_irq_type(ICSState *ics, int srcno, bool lsi);
+
+int xics_find_source(XICSState *icp, int irq);
+
#endif /* __XICS_H__ */
diff --git a/include/hw/register.h b/include/hw/register.h
new file mode 100644
index 0000000000..8c12233b75
--- /dev/null
+++ b/include/hw/register.h
@@ -0,0 +1,255 @@
+/*
+ * Register Definition API
+ *
+ * Copyright (c) 2016 Xilinx Inc.
+ * Copyright (c) 2013 Peter Crosthwaite <peter.crosthwaite@xilinx.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef REGISTER_H
+#define REGISTER_H
+
+#include "hw/qdev-core.h"
+#include "exec/memory.h"
+
+typedef struct RegisterInfo RegisterInfo;
+typedef struct RegisterAccessInfo RegisterAccessInfo;
+typedef struct RegisterInfoArray RegisterInfoArray;
+
+/**
+ * Access description for a register that is part of guest accessible device
+ * state.
+ *
+ * @name: String name of the register
+ * @ro: whether or not the bit is read-only
+ * @w1c: bits with the common write 1 to clear semantic.
+ * @reset: reset value.
+ * @cor: Bits that are clear on read
+ * @rsvd: Bits that are reserved and should not be changed
+ *
+ * @pre_write: Pre write callback. Passed the value that's to be written,
+ * immediately before the actual write. The returned value is what is written,
+ * giving the handler a chance to modify the written value.
+ * @post_write: Post write callback. Passed the written value. Most write side
+ * effects should be implemented here.
+ *
+ * @post_read: Post read callback. Passes the value that is about to be returned
+ * for a read. The return value from this function is what is ultimately read,
+ * allowing this function to modify the value before return to the client.
+ */
+
+struct RegisterAccessInfo {
+ const char *name;
+ uint64_t ro;
+ uint64_t w1c;
+ uint64_t reset;
+ uint64_t cor;
+ uint64_t rsvd;
+ uint64_t unimp;
+
+ uint64_t (*pre_write)(RegisterInfo *reg, uint64_t val);
+ void (*post_write)(RegisterInfo *reg, uint64_t val);
+
+ uint64_t (*post_read)(RegisterInfo *reg, uint64_t val);
+
+ hwaddr addr;
+};
+
+/**
+ * A register that is part of guest accessible state
+ * @data: pointer to the register data. Will be cast
+ * to the relevant uint type depending on data_size.
+ * @data_size: Size of the register in bytes. Must be
+ * 1, 2, 4 or 8
+ *
+ * @access: Access description of this register
+ *
+ * @debug: Whether or not verbose debug is enabled
+ * @prefix: String prefix for log and debug messages
+ *
+ * @opaque: Opaque data for the register
+ */
+
+struct RegisterInfo {
+ /* <private> */
+ DeviceState parent_obj;
+
+ /* <public> */
+ void *data;
+ int data_size;
+
+ const RegisterAccessInfo *access;
+
+ void *opaque;
+};
+
+#define TYPE_REGISTER "qemu,register"
+#define REGISTER(obj) OBJECT_CHECK(RegisterInfo, (obj), TYPE_REGISTER)
+
+/**
+ * This structure is used to group all of the individual registers which are
+ * modeled using the RegisterInfo structure.
+ *
+ * @r is an aray containing of all the relevent RegisterInfo structures.
+ *
+ * @num_elements is the number of elements in the array r
+ *
+ * @mem: optional Memory region for the register
+ */
+
+struct RegisterInfoArray {
+ MemoryRegion mem;
+
+ int num_elements;
+ RegisterInfo **r;
+
+ bool debug;
+ const char *prefix;
+};
+
+/**
+ * write a value to a register, subject to its restrictions
+ * @reg: register to write to
+ * @val: value to write
+ * @we: write enable mask
+ * @prefix: The device prefix that should be printed before the register name
+ * @debug: Should the write operation debug information be printed?
+ */
+
+void register_write(RegisterInfo *reg, uint64_t val, uint64_t we,
+ const char *prefix, bool debug);
+
+/**
+ * read a value from a register, subject to its restrictions
+ * @reg: register to read from
+ * @re: read enable mask
+ * @prefix: The device prefix that should be printed before the register name
+ * @debug: Should the read operation debug information be printed?
+ * returns: value read
+ */
+
+uint64_t register_read(RegisterInfo *reg, uint64_t re, const char* prefix,
+ bool debug);
+
+/**
+ * reset a register
+ * @reg: register to reset
+ */
+
+void register_reset(RegisterInfo *reg);
+
+/**
+ * Initialize a register.
+ * @reg: Register to initialize
+ */
+
+void register_init(RegisterInfo *reg);
+
+/**
+ * Memory API MMIO write handler that will write to a Register API register.
+ * @opaque: RegisterInfo to write to
+ * @addr: Address to write
+ * @value: Value to write
+ * @size: Number of bytes to write
+ */
+
+void register_write_memory(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size);
+
+/**
+ * Memory API MMIO read handler that will read from a Register API register.
+ * @opaque: RegisterInfo to read from
+ * @addr: Address to read
+ * @size: Number of bytes to read
+ * returns: Value read from register
+ */
+
+uint64_t register_read_memory(void *opaque, hwaddr addr, unsigned size);
+
+/**
+ * Init a block of registers into a container MemoryRegion. A
+ * number of constant register definitions are parsed to create a corresponding
+ * array of RegisterInfo's.
+ *
+ * @owner: device owning the registers
+ * @rae: Register definitions to init
+ * @num: number of registers to init (length of @rae)
+ * @ri: Register array to init, must already be allocated
+ * @data: Array to use for register data, must already be allocated
+ * @ops: Memory region ops to access registers.
+ * @debug enabled: turn on/off verbose debug information
+ * returns: A structure containing all of the registers and an initialized
+ * memory region (r_array->mem) the caller should add to a container.
+ */
+
+RegisterInfoArray *register_init_block32(DeviceState *owner,
+ const RegisterAccessInfo *rae,
+ int num, RegisterInfo *ri,
+ uint32_t *data,
+ const MemoryRegionOps *ops,
+ bool debug_enabled,
+ uint64_t memory_size);
+
+/**
+ * This function should be called to cleanup the registers that were initialized
+ * when calling register_init_block32(). This function should only be called
+ * from the device's instance_finalize function.
+ *
+ * Any memory operations that the device performed that require cleanup (such
+ * as creating subregions) need to be called before calling this function.
+ *
+ * @r_array: A structure containing all of the registers, as returned by
+ * register_init_block32()
+ */
+
+void register_finalize_block(RegisterInfoArray *r_array);
+
+/* Define constants for a 32 bit register */
+
+/* This macro will define A_FOO, for the byte address of a register
+ * as well as R_FOO for the uint32_t[] register number (A_FOO / 4).
+ */
+#define REG32(reg, addr) \
+ enum { A_ ## reg = (addr) }; \
+ enum { R_ ## reg = (addr) / 4 };
+
+/* Define SHIFT, LENGTH and MASK constants for a field within a register */
+
+/* This macro will define FOO_BAR_MASK, FOO_BAR_SHIFT and FOO_BAR_LENGTH
+ * constants for field BAR in register FOO.
+ */
+#define FIELD(reg, field, shift, length) \
+ enum { R_ ## reg ## _ ## field ## _SHIFT = (shift)}; \
+ enum { R_ ## reg ## _ ## field ## _LENGTH = (length)}; \
+ enum { R_ ## reg ## _ ## field ## _MASK = \
+ MAKE_64BIT_MASK(shift, length)};
+
+/* Extract a field from a register */
+#define FIELD_EX32(storage, reg, field) \
+ extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
+
+/* Extract a field from an array of registers */
+#define ARRAY_FIELD_EX32(regs, reg, field) \
+ FIELD_EX32((regs)[R_ ## reg], reg, field)
+
+/* Deposit a register field.
+ * Assigning values larger then the target field will result in
+ * compilation warnings.
+ */
+#define FIELD_DP32(storage, reg, field, val) ({ \
+ struct { \
+ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } v = { .v = val }; \
+ uint32_t d; \
+ d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, v.v); \
+ d; })
+
+/* Deposit a field to array of registers. */
+#define ARRAY_FIELD_DP32(regs, reg, field, val) \
+ (regs)[R_ ## reg] = FIELD_DP32((regs)[R_ ## reg], reg, field, val);
+
+#endif
diff --git a/include/hw/smbios/ipmi.h b/include/hw/smbios/ipmi.h
new file mode 100644
index 0000000000..1c9aae38f2
--- /dev/null
+++ b/include/hw/smbios/ipmi.h
@@ -0,0 +1,15 @@
+/*
+ * IPMI SMBIOS firmware handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_SMBIOS_IPMI_H
+#define QEMU_SMBIOS_IPMI_H
+
+void smbios_build_type_38_table(void);
+
+#endif /* QEMU_SMBIOS_IPMI_H */
diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h
new file mode 100644
index 0000000000..def3b4507e
--- /dev/null
+++ b/include/hw/ssi/aspeed_smc.h
@@ -0,0 +1,100 @@
+/*
+ * ASPEED AST2400 SMC Controller (SPI Flash Only)
+ *
+ * Copyright (C) 2016 IBM Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef ASPEED_SMC_H
+#define ASPEED_SMC_H
+
+#include "hw/ssi/ssi.h"
+
+typedef struct AspeedSegments {
+ hwaddr addr;
+ uint32_t size;
+} AspeedSegments;
+
+struct AspeedSMCState;
+typedef struct AspeedSMCController {
+ const char *name;
+ uint8_t r_conf;
+ uint8_t r_ce_ctrl;
+ uint8_t r_ctrl0;
+ uint8_t r_timings;
+ uint8_t conf_enable_w0;
+ uint8_t max_slaves;
+ const AspeedSegments *segments;
+ uint32_t mapping_window_size;
+} AspeedSMCController;
+
+typedef struct AspeedSMCFlash {
+ const struct AspeedSMCState *controller;
+
+ uint8_t id;
+ uint32_t size;
+
+ MemoryRegion mmio;
+ DeviceState *flash;
+} AspeedSMCFlash;
+
+#define TYPE_ASPEED_SMC "aspeed.smc"
+#define ASPEED_SMC(obj) OBJECT_CHECK(AspeedSMCState, (obj), TYPE_ASPEED_SMC)
+#define ASPEED_SMC_CLASS(klass) \
+ OBJECT_CLASS_CHECK(AspeedSMCClass, (klass), TYPE_ASPEED_SMC)
+#define ASPEED_SMC_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(AspeedSMCClass, (obj), TYPE_ASPEED_SMC)
+
+typedef struct AspeedSMCClass {
+ SysBusDevice parent_obj;
+ const AspeedSMCController *ctrl;
+} AspeedSMCClass;
+
+#define ASPEED_SMC_R_MAX (0x100 / 4)
+
+typedef struct AspeedSMCState {
+ SysBusDevice parent_obj;
+
+ const AspeedSMCController *ctrl;
+
+ MemoryRegion mmio;
+ MemoryRegion mmio_flash;
+
+ qemu_irq irq;
+ int irqline;
+
+ uint32_t num_cs;
+ qemu_irq *cs_lines;
+
+ SSIBus *spi;
+
+ uint32_t regs[ASPEED_SMC_R_MAX];
+
+ /* depends on the controller type */
+ uint8_t r_conf;
+ uint8_t r_ce_ctrl;
+ uint8_t r_ctrl0;
+ uint8_t r_timings;
+ uint8_t conf_enable_w0;
+
+ AspeedSMCFlash *flashes;
+} AspeedSMCState;
+
+#endif /* ASPEED_SMC_H */
diff --git a/include/hw/ssi/ssi.h b/include/hw/ssi/ssi.h
index 4a0a53903c..6a0c3c3cdb 100644
--- a/include/hw/ssi/ssi.h
+++ b/include/hw/ssi/ssi.h
@@ -37,7 +37,7 @@ enum SSICSMode {
struct SSISlaveClass {
DeviceClass parent_class;
- int (*init)(SSISlave *dev);
+ void (*realize)(SSISlave *dev, Error **errp);
/* if you have standard or no CS behaviour, just override transfer.
* This is called when the device cs is active (true by default).
diff --git a/include/hw/timer/i8254.h b/include/hw/timer/i8254.h
index 434903348c..27a0fb6c51 100644
--- a/include/hw/timer/i8254.h
+++ b/include/hw/timer/i8254.h
@@ -37,6 +37,14 @@ typedef struct PITChannelInfo {
int out;
} PITChannelInfo;
+#define TYPE_PIT_COMMON "pit-common"
+#define PIT_COMMON(obj) \
+ OBJECT_CHECK(PITCommonState, (obj), TYPE_PIT_COMMON)
+#define PIT_COMMON_CLASS(klass) \
+ OBJECT_CLASS_CHECK(PITCommonClass, (klass), TYPE_PIT_COMMON)
+#define PIT_COMMON_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(PITCommonClass, (obj), TYPE_PIT_COMMON)
+
#define TYPE_I8254 "isa-pit"
#define TYPE_KVM_I8254 "kvm-pit"
diff --git a/include/hw/timer/i8254_internal.h b/include/hw/timer/i8254_internal.h
index 61a1bfbc4e..e7cb7573ca 100644
--- a/include/hw/timer/i8254_internal.h
+++ b/include/hw/timer/i8254_internal.h
@@ -57,14 +57,6 @@ typedef struct PITCommonState {
PITChannelState channels[3];
} PITCommonState;
-#define TYPE_PIT_COMMON "pit-common"
-#define PIT_COMMON(obj) \
- OBJECT_CHECK(PITCommonState, (obj), TYPE_PIT_COMMON)
-#define PIT_COMMON_CLASS(klass) \
- OBJECT_CLASS_CHECK(PITCommonClass, (klass), TYPE_PIT_COMMON)
-#define PIT_COMMON_GET_CLASS(obj) \
- OBJECT_GET_CLASS(PITCommonClass, (obj), TYPE_PIT_COMMON)
-
typedef struct PITCommonClass {
ISADeviceClass parent_class;
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 0610377789..07f7188df4 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -73,6 +73,8 @@ typedef struct VFIOContainer {
VFIOAddressSpace *space;
int fd; /* /dev/vfio/vfio, empowered by the attached groups */
MemoryListener listener;
+ MemoryListener prereg_listener;
+ unsigned iommu_type;
int error;
bool initialized;
/*
@@ -80,9 +82,8 @@ typedef struct VFIOContainer {
* contiguous IOVA window. We may need to generalize that in
* future
*/
- hwaddr min_iova, max_iova;
- uint64_t iova_pgsizes;
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
+ QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
QLIST_ENTRY(VFIOContainer) next;
} VFIOContainer;
@@ -95,6 +96,13 @@ typedef struct VFIOGuestIOMMU {
QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
} VFIOGuestIOMMU;
+typedef struct VFIOHostDMAWindow {
+ hwaddr min_iova;
+ hwaddr max_iova;
+ uint64_t iova_pgsizes;
+ QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
+} VFIOHostDMAWindow;
+
typedef struct VFIODeviceOps VFIODeviceOps;
typedef struct VFIODevice {
@@ -158,4 +166,12 @@ int vfio_get_region_info(VFIODevice *vbasedev, int index,
int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
uint32_t subtype, struct vfio_region_info **info);
#endif
+extern const MemoryListener vfio_prereg_listener;
+
+int vfio_spapr_create_window(VFIOContainer *container,
+ MemoryRegionSection *section,
+ hwaddr *pgsize);
+int vfio_spapr_remove_window(VFIOContainer *container,
+ hwaddr offset_within_address_space);
+
#endif /* !HW_VFIO_VFIO_COMMON_H */
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 8f2b056515..e9bf463f53 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -38,6 +38,7 @@ struct VirtIOBlkConf
uint32_t scsi;
uint32_t config_wce;
uint32_t request_merging;
+ uint16_t num_queues;
};
struct VirtIOBlockDataPlane;
@@ -46,7 +47,6 @@ struct VirtIOBlockReq;
typedef struct VirtIOBlock {
VirtIODevice parent_obj;
BlockBackend *blk;
- VirtQueue *vq;
void *rq;
QEMUBH *bh;
VirtIOBlkConf conf;
@@ -62,6 +62,7 @@ typedef struct VirtIOBlockReq {
VirtQueueElement elem;
int64_t sector_num;
VirtIOBlock *dev;
+ VirtQueue *vq;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr out;
QEMUIOVector qiov;
@@ -79,7 +80,8 @@ typedef struct MultiReqBuffer {
bool is_write;
} MultiReqBuffer;
-void virtio_blk_init_request(VirtIOBlock *s, VirtIOBlockReq *req);
+void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
+ VirtIOBlockReq *req);
void virtio_blk_free_request(VirtIOBlockReq *req);
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 3f2c1363d0..f3e5ef3f5b 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -52,7 +52,6 @@ typedef struct VirtioBusClass {
bool (*has_extra_state)(DeviceState *d);
bool (*query_guest_notifiers)(DeviceState *d);
int (*set_guest_notifiers)(DeviceState *d, int nvqs, bool assign);
- int (*set_host_notifier)(DeviceState *d, int n, bool assigned);
void (*vmstate_change)(DeviceState *d, bool running);
/*
* transport independent init function.
@@ -71,6 +70,29 @@ typedef struct VirtioBusClass {
void (*device_unplugged)(DeviceState *d);
int (*query_nvectors)(DeviceState *d);
/*
+ * ioeventfd handling: if the transport implements ioeventfd_started,
+ * it must implement the other ioeventfd callbacks as well
+ */
+ /* Returns true if the ioeventfd has been started for the device. */
+ bool (*ioeventfd_started)(DeviceState *d);
+ /*
+ * Sets the 'ioeventfd started' state after the ioeventfd has been
+ * started/stopped for the device. err signifies whether an error
+ * had occurred.
+ */
+ void (*ioeventfd_set_started)(DeviceState *d, bool started, bool err);
+ /* Returns true if the ioeventfd has been disabled for the device. */
+ bool (*ioeventfd_disabled)(DeviceState *d);
+ /* Sets the 'ioeventfd disabled' state for the device. */
+ void (*ioeventfd_set_disabled)(DeviceState *d, bool disabled);
+ /*
+ * Assigns/deassigns the ioeventfd backing for the transport on
+ * the device for queue number n. Returns an error value on
+ * failure.
+ */
+ int (*ioeventfd_assign)(DeviceState *d, EventNotifier *notifier,
+ int n, bool assign);
+ /*
* Does the transport have variable vring alignment?
* (ie can it ever call virtio_queue_set_align()?)
* Note that changing this will break migration for this transport.
@@ -111,4 +133,11 @@ static inline VirtIODevice *virtio_bus_get_device(VirtioBusState *bus)
return (VirtIODevice *)qdev;
}
+/* Start the ioeventfd. */
+void virtio_bus_start_ioeventfd(VirtioBusState *bus);
+/* Stop the ioeventfd. */
+void virtio_bus_stop_ioeventfd(VirtioBusState *bus);
+/* Switch from/to the generic ioeventfd handler */
+int virtio_bus_set_host_notifier(VirtioBusState *bus, int n, bool assign);
+
#endif /* VIRTIO_BUS_H */
diff --git a/include/io/channel.h b/include/io/channel.h
index d37acd29e0..e52f059310 100644
--- a/include/io/channel.h
+++ b/include/io/channel.h
@@ -42,6 +42,7 @@ typedef enum QIOChannelFeature QIOChannelFeature;
enum QIOChannelFeature {
QIO_CHANNEL_FEATURE_FD_PASS = (1 << 0),
QIO_CHANNEL_FEATURE_SHUTDOWN = (1 << 1),
+ QIO_CHANNEL_FEATURE_LISTEN = (1 << 2),
};
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 25ea58a77f..15ea7679bd 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -904,8 +904,11 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_PARTIAL_BUFFER(_f, _s, _size) \
VMSTATE_STATIC_BUFFER(_f, _s, 0, NULL, 0, _size)
+#define VMSTATE_BUFFER_START_MIDDLE_V(_f, _s, _start, _v) \
+ VMSTATE_STATIC_BUFFER(_f, _s, _v, NULL, _start, sizeof(typeof_field(_s, _f)))
+
#define VMSTATE_BUFFER_START_MIDDLE(_f, _s, _start) \
- VMSTATE_STATIC_BUFFER(_f, _s, 0, NULL, _start, sizeof(typeof_field(_s, _f)))
+ VMSTATE_BUFFER_START_MIDDLE_V(_f, _s, _start, 0)
#define VMSTATE_PARTIAL_VBUFFER(_f, _s, _size) \
VMSTATE_VBUFFER(_f, _s, 0, NULL, 0, _size)
diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h
index 15418a86df..98fb005aba 100644
--- a/include/qemu/bitops.h
+++ b/include/qemu/bitops.h
@@ -24,6 +24,9 @@
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define MAKE_64BIT_MASK(shift, length) \
+ (((~0ULL) >> (64 - (length))) << (shift))
+
/**
* set_bit - Set a bit in memory
* @nr: the bit to set
diff --git a/include/qemu/range.h b/include/qemu/range.h
index c903eb574a..3970f00089 100644
--- a/include/qemu/range.h
+++ b/include/qemu/range.h
@@ -1,3 +1,23 @@
+/*
+ * QEMU 64-bit address ranges
+ *
+ * Copyright (c) 2015-2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
#ifndef QEMU_RANGE_H
#define QEMU_RANGE_H
@@ -59,75 +79,6 @@ static inline int ranges_overlap(uint64_t first1, uint64_t len1,
return !(last2 < first1 || last1 < first2);
}
-/* 0,1 can merge with 1,2 but don't overlap */
-static inline bool ranges_can_merge(Range *range1, Range *range2)
-{
- return !(range1->end < range2->begin || range2->end < range1->begin);
-}
-
-static inline int range_merge(Range *range1, Range *range2)
-{
- if (ranges_can_merge(range1, range2)) {
- if (range1->end < range2->end) {
- range1->end = range2->end;
- }
- if (range1->begin > range2->begin) {
- range1->begin = range2->begin;
- }
- return 0;
- }
-
- return -1;
-}
-
-static inline GList *g_list_insert_sorted_merged(GList *list,
- gpointer data,
- GCompareFunc func)
-{
- GList *l, *next = NULL;
- Range *r, *nextr;
-
- if (!list) {
- list = g_list_insert_sorted(list, data, func);
- return list;
- }
-
- nextr = data;
- l = list;
- while (l && l != next && nextr) {
- r = l->data;
- if (ranges_can_merge(r, nextr)) {
- range_merge(r, nextr);
- l = g_list_remove_link(l, next);
- next = g_list_next(l);
- if (next) {
- nextr = next->data;
- } else {
- nextr = NULL;
- }
- } else {
- l = g_list_next(l);
- }
- }
-
- if (!l) {
- list = g_list_insert_sorted(list, data, func);
- }
-
- return list;
-}
-
-static inline gint range_compare(gconstpointer a, gconstpointer b)
-{
- Range *ra = (Range *)a, *rb = (Range *)b;
- if (ra->begin == rb->begin && ra->end == rb->end) {
- return 0;
- } else if (range_get_last(ra->begin, ra->end) <
- range_get_last(rb->begin, rb->end)) {
- return -1;
- } else {
- return 1;
- }
-}
+GList *range_list_insert(GList *list, Range *data);
#endif
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index 1bd92180f3..462033a4de 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -51,6 +51,7 @@ SocketAddress *socket_parse(const char *str, Error **errp);
int socket_connect(SocketAddress *addr, Error **errp,
NonBlockingConnectHandler *callback, void *opaque);
int socket_listen(SocketAddress *addr, Error **errp);
+void socket_listen_cleanup(int fd, Error **errp);
int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp);
/* Old, ipv4 only bits. Don't use for new code. */
@@ -110,4 +111,18 @@ SocketAddress *socket_remote_address(int fd, Error **errp);
void qapi_copy_SocketAddress(SocketAddress **p_dest,
SocketAddress *src);
+/**
+ * socket_address_to_string:
+ * @addr: the socket address struct
+ * @errp: pointer to uninitialized error object
+ *
+ * Get the string representation of the socket
+ * address. A pointer to the char array containing
+ * string format will be returned, the caller is
+ * required to release the returned value when no
+ * longer required with g_free.
+ *
+ * Returns: the socket address in string format, or NULL on error
+ */
+char *socket_address_to_string(struct SocketAddress *addr, Error **errp);
#endif /* QEMU_SOCKET_H */
diff --git a/include/sysemu/char.h b/include/sysemu/char.h
index 1eb2d0f309..57df10aa00 100644
--- a/include/sysemu/char.h
+++ b/include/sysemu/char.h
@@ -221,8 +221,20 @@ void qemu_chr_fe_event(CharDriverState *s, int event);
void qemu_chr_fe_printf(CharDriverState *s, const char *fmt, ...)
GCC_FMT_ATTR(2, 3);
-int qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond,
- GIOFunc func, void *user_data);
+/**
+ * @qemu_chr_fe_add_watch:
+ *
+ * If the backend is connected, create and add a #GSource that fires
+ * when the given condition (typically G_IO_OUT|G_IO_HUP or G_IO_HUP)
+ * is active; return the #GSource's tag. If it is disconnected,
+ * return 0.
+ *
+ * @cond the condition to poll for
+ * @func the function to call when the condition happens
+ * @user_data the opaque pointer to pass to @func
+ */
+guint qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond,
+ GIOFunc func, void *user_data);
/**
* @qemu_chr_fe_write:
diff --git a/include/ui/console.h b/include/ui/console.h
index 52a5f65673..7c1fdbad6f 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -460,7 +460,6 @@ void vnc_display_add_client(const char *id, int csock, bool skipauth);
#ifdef CONFIG_VNC
int vnc_display_password(const char *id, const char *password);
int vnc_display_pw_expire(const char *id, time_t expires);
-char *vnc_display_local_addr(const char *id);
QemuOpts *vnc_parse(const char *str, Error **errp);
int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp);
#else
@@ -482,12 +481,6 @@ static inline int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp)
error_setg(errp, "VNC support is disabled");
return -1;
}
-static inline char *vnc_display_local_addr(const char *id)
-{
- /* This must never be called if CONFIG_VNC is disabled */
- error_report("VNC support is disabled");
- abort();
-}
#endif
/* curses.c */
diff --git a/io/channel-socket.c b/io/channel-socket.c
index ca8bc20b17..6ec87f8cdb 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -71,6 +71,9 @@ qio_channel_socket_set_fd(QIOChannelSocket *sioc,
int fd,
Error **errp)
{
+ int val;
+ socklen_t len = sizeof(val);
+
if (sioc->fd != -1) {
error_setg(errp, "Socket is already open");
return -1;
@@ -106,6 +109,10 @@ qio_channel_socket_set_fd(QIOChannelSocket *sioc,
ioc->features |= (1 << QIO_CHANNEL_FEATURE_FD_PASS);
}
#endif /* WIN32 */
+ if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &val, &len) == 0 && val) {
+ QIOChannel *ioc = QIO_CHANNEL(sioc);
+ ioc->features |= (1 << QIO_CHANNEL_FEATURE_LISTEN);
+ }
return 0;
@@ -393,7 +400,17 @@ static void qio_channel_socket_init(Object *obj)
static void qio_channel_socket_finalize(Object *obj)
{
QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(obj);
+
if (ioc->fd != -1) {
+ if (QIO_CHANNEL(ioc)->features & QIO_CHANNEL_FEATURE_LISTEN) {
+ Error *err = NULL;
+
+ socket_listen_cleanup(ioc->fd, &err);
+ if (err) {
+ error_report_err(err);
+ err = NULL;
+ }
+ }
#ifdef WIN32
WSAEventSelect(ioc->fd, NULL, 0);
#endif
diff --git a/linux-user/host/aarch64/hostdep.h b/linux-user/host/aarch64/hostdep.h
new file mode 100644
index 0000000000..b79eaf1811
--- /dev/null
+++ b/linux-user/host/aarch64/hostdep.h
@@ -0,0 +1,38 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+/* We have a safe-syscall.inc.S */
+#define HAVE_SAFE_SYSCALL
+
+#ifndef __ASSEMBLER__
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
+static inline void rewind_if_in_safe_syscall(void *puc)
+{
+ struct ucontext *uc = puc;
+ __u64 *pcreg = &uc->uc_mcontext.pc;
+
+ if (*pcreg > (uintptr_t)safe_syscall_start
+ && *pcreg < (uintptr_t)safe_syscall_end) {
+ *pcreg = (uintptr_t)safe_syscall_start;
+ }
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/linux-user/host/aarch64/safe-syscall.inc.S b/linux-user/host/aarch64/safe-syscall.inc.S
new file mode 100644
index 0000000000..58a2329b37
--- /dev/null
+++ b/linux-user/host/aarch64/safe-syscall.inc.S
@@ -0,0 +1,75 @@
+/*
+ * safe-syscall.inc.S : host-specific assembly fragment
+ * to handle signals occurring at the same time as system calls.
+ * This is intended to be included by linux-user/safe-syscall.S
+ *
+ * Written by Richard Henderson <rth@twiddle.net>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+ .global safe_syscall_base
+ .global safe_syscall_start
+ .global safe_syscall_end
+ .type safe_syscall_base, #function
+ .type safe_syscall_start, #function
+ .type safe_syscall_end, #function
+
+ /* This is the entry point for making a system call. The calling
+ * convention here is that of a C varargs function with the
+ * first argument an 'int *' to the signal_pending flag, the
+ * second one the system call number (as a 'long'), and all further
+ * arguments being syscall arguments (also 'long').
+ * We return a long which is the syscall's return value, which
+ * may be negative-errno on failure. Conversion to the
+ * -1-and-errno-set convention is done by the calling wrapper.
+ */
+safe_syscall_base:
+ .cfi_startproc
+ /* The syscall calling convention isn't the same as the
+ * C one:
+ * we enter with x0 == *signal_pending
+ * x1 == syscall number
+ * x2 ... x7, (stack) == syscall arguments
+ * and return the result in x0
+ * and the syscall instruction needs
+ * x8 == syscall number
+ * x0 ... x7 == syscall arguments
+ * and returns the result in x0
+ * Shuffle everything around appropriately.
+ */
+ mov x9, x0 /* signal_pending pointer */
+ mov x8, x1 /* syscall number */
+ mov x0, x2 /* syscall arguments */
+ mov x1, x3
+ mov x2, x4
+ mov x3, x5
+ mov x4, x6
+ mov x6, x7
+ ldr x7, [sp]
+
+ /* This next sequence of code works in conjunction with the
+ * rewind_if_safe_syscall_function(). If a signal is taken
+ * and the interrupted PC is anywhere between 'safe_syscall_start'
+ * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
+ * The code sequence must therefore be able to cope with this, and
+ * the syscall instruction must be the final one in the sequence.
+ */
+safe_syscall_start:
+ /* if signal_pending is non-zero, don't do the call */
+ ldr w10, [x9]
+ cbnz w10, 0f
+ svc 0x0
+safe_syscall_end:
+ /* code path for having successfully executed the syscall */
+ ret
+
+0:
+ /* code path when we didn't execute the syscall */
+ mov x0, #-TARGET_ERESTARTSYS
+ ret
+ .cfi_endproc
+
+ .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/arm/hostdep.h b/linux-user/host/arm/hostdep.h
new file mode 100644
index 0000000000..8e1ff2ffc5
--- /dev/null
+++ b/linux-user/host/arm/hostdep.h
@@ -0,0 +1,38 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+/* We have a safe-syscall.inc.S */
+#define HAVE_SAFE_SYSCALL
+
+#ifndef __ASSEMBLER__
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
+static inline void rewind_if_in_safe_syscall(void *puc)
+{
+ struct ucontext *uc = puc;
+ unsigned long *pcreg = &uc->uc_mcontext.arm_pc;
+
+ if (*pcreg > (uintptr_t)safe_syscall_start
+ && *pcreg < (uintptr_t)safe_syscall_end) {
+ *pcreg = (uintptr_t)safe_syscall_start;
+ }
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/linux-user/host/arm/safe-syscall.inc.S b/linux-user/host/arm/safe-syscall.inc.S
new file mode 100644
index 0000000000..88c4958504
--- /dev/null
+++ b/linux-user/host/arm/safe-syscall.inc.S
@@ -0,0 +1,90 @@
+/*
+ * safe-syscall.inc.S : host-specific assembly fragment
+ * to handle signals occurring at the same time as system calls.
+ * This is intended to be included by linux-user/safe-syscall.S
+ *
+ * Written by Richard Henderson <rth@twiddle.net>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+ .global safe_syscall_base
+ .global safe_syscall_start
+ .global safe_syscall_end
+ .type safe_syscall_base, %function
+
+ .cfi_sections .debug_frame
+
+ .text
+ .syntax unified
+ .arm
+ .align 2
+
+ /* This is the entry point for making a system call. The calling
+ * convention here is that of a C varargs function with the
+ * first argument an 'int *' to the signal_pending flag, the
+ * second one the system call number (as a 'long'), and all further
+ * arguments being syscall arguments (also 'long').
+ * We return a long which is the syscall's return value, which
+ * may be negative-errno on failure. Conversion to the
+ * -1-and-errno-set convention is done by the calling wrapper.
+ */
+safe_syscall_base:
+ .fnstart
+ .cfi_startproc
+ mov r12, sp /* save entry stack */
+ push { r4, r5, r6, r7, r8, lr }
+ .save { r4, r5, r6, r7, r8, lr }
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r4, 0
+ .cfi_rel_offset r5, 4
+ .cfi_rel_offset r6, 8
+ .cfi_rel_offset r7, 12
+ .cfi_rel_offset r8, 16
+ .cfi_rel_offset lr, 20
+
+ /* The syscall calling convention isn't the same as the C one:
+ * we enter with r0 == *signal_pending
+ * r1 == syscall number
+ * r2, r3, [sp+0] ... [sp+12] == syscall arguments
+ * and return the result in r0
+ * and the syscall instruction needs
+ * r7 == syscall number
+ * r0 ... r6 == syscall arguments
+ * and returns the result in r0
+ * Shuffle everything around appropriately.
+ * Note the 16 bytes that we pushed to save registers.
+ */
+ mov r8, r0 /* copy signal_pending */
+ mov r7, r1 /* syscall number */
+ mov r0, r2 /* syscall args */
+ mov r1, r3
+ ldm r12, { r2, r3, r4, r5, r6 }
+
+ /* This next sequence of code works in conjunction with the
+ * rewind_if_safe_syscall_function(). If a signal is taken
+ * and the interrupted PC is anywhere between 'safe_syscall_start'
+ * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
+ * The code sequence must therefore be able to cope with this, and
+ * the syscall instruction must be the final one in the sequence.
+ */
+safe_syscall_start:
+ /* if signal_pending is non-zero, don't do the call */
+ ldr r12, [r8] /* signal_pending */
+ tst r12, r12
+ bne 1f
+ swi 0
+safe_syscall_end:
+ /* code path for having successfully executed the syscall */
+ pop { r4, r5, r6, r7, r8, pc }
+
+1:
+ /* code path when we didn't execute the syscall */
+ ldr r0, =-TARGET_ERESTARTSYS
+ pop { r4, r5, r6, r7, r8, pc }
+ .fnend
+ .cfi_endproc
+
+ .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/i386/hostdep.h b/linux-user/host/i386/hostdep.h
new file mode 100644
index 0000000000..5a12f4adce
--- /dev/null
+++ b/linux-user/host/i386/hostdep.h
@@ -0,0 +1,38 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+/* We have a safe-syscall.inc.S */
+#define HAVE_SAFE_SYSCALL
+
+#ifndef __ASSEMBLER__
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
+static inline void rewind_if_in_safe_syscall(void *puc)
+{
+ struct ucontext *uc = puc;
+ greg_t *pcreg = &uc->uc_mcontext.gregs[REG_EIP];
+
+ if (*pcreg > (uintptr_t)safe_syscall_start
+ && *pcreg < (uintptr_t)safe_syscall_end) {
+ *pcreg = (uintptr_t)safe_syscall_start;
+ }
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/linux-user/host/i386/safe-syscall.inc.S b/linux-user/host/i386/safe-syscall.inc.S
new file mode 100644
index 0000000000..766d0ded98
--- /dev/null
+++ b/linux-user/host/i386/safe-syscall.inc.S
@@ -0,0 +1,112 @@
+/*
+ * safe-syscall.inc.S : host-specific assembly fragment
+ * to handle signals occurring at the same time as system calls.
+ * This is intended to be included by linux-user/safe-syscall.S
+ *
+ * Written by Richard Henderson <rth@twiddle.net>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+ .global safe_syscall_base
+ .global safe_syscall_start
+ .global safe_syscall_end
+ .type safe_syscall_base, @function
+
+ /* This is the entry point for making a system call. The calling
+ * convention here is that of a C varargs function with the
+ * first argument an 'int *' to the signal_pending flag, the
+ * second one the system call number (as a 'long'), and all further
+ * arguments being syscall arguments (also 'long').
+ * We return a long which is the syscall's return value, which
+ * may be negative-errno on failure. Conversion to the
+ * -1-and-errno-set convention is done by the calling wrapper.
+ */
+safe_syscall_base:
+ .cfi_startproc
+ push %ebp
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset ebp, 0
+ push %esi
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset esi, 0
+ push %edi
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset edi, 0
+ push %ebx
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset ebx, 0
+
+ /* The syscall calling convention isn't the same as the C one:
+ * we enter with 0(%esp) == return address
+ * 4(%esp) == *signal_pending
+ * 8(%esp) == syscall number
+ * 12(%esp) ... 32(%esp) == syscall arguments
+ * and return the result in eax
+ * and the syscall instruction needs
+ * eax == syscall number
+ * ebx, ecx, edx, esi, edi, ebp == syscall arguments
+ * and returns the result in eax
+ * Shuffle everything around appropriately.
+ * Note the 16 bytes that we pushed to save registers.
+ */
+ mov 12+16(%esp), %ebx /* the syscall arguments */
+ mov 16+16(%esp), %ecx
+ mov 20+16(%esp), %edx
+ mov 24+16(%esp), %esi
+ mov 28+16(%esp), %edi
+ mov 32+16(%esp), %ebp
+
+ /* This next sequence of code works in conjunction with the
+ * rewind_if_safe_syscall_function(). If a signal is taken
+ * and the interrupted PC is anywhere between 'safe_syscall_start'
+ * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
+ * The code sequence must therefore be able to cope with this, and
+ * the syscall instruction must be the final one in the sequence.
+ */
+safe_syscall_start:
+ /* if signal_pending is non-zero, don't do the call */
+ mov 4+16(%esp), %eax /* signal_pending */
+ cmp $0, (%eax)
+ jnz 1f
+ mov 8+16(%esp), %eax /* syscall number */
+ int $0x80
+safe_syscall_end:
+ /* code path for having successfully executed the syscall */
+ pop %ebx
+ .cfi_remember_state
+ .cfi_def_cfa_offset -4
+ .cfi_restore ebx
+ pop %edi
+ .cfi_def_cfa_offset -4
+ .cfi_restore edi
+ pop %esi
+ .cfi_def_cfa_offset -4
+ .cfi_restore esi
+ pop %ebp
+ .cfi_def_cfa_offset -4
+ .cfi_restore ebp
+ ret
+
+1:
+ /* code path when we didn't execute the syscall */
+ .cfi_restore_state
+ mov $-TARGET_ERESTARTSYS, %eax
+ pop %ebx
+ .cfi_def_cfa_offset -4
+ .cfi_restore ebx
+ pop %edi
+ .cfi_def_cfa_offset -4
+ .cfi_restore edi
+ pop %esi
+ .cfi_def_cfa_offset -4
+ .cfi_restore esi
+ pop %ebp
+ .cfi_def_cfa_offset -4
+ .cfi_restore ebp
+ ret
+ .cfi_endproc
+
+ .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/generic/hostdep.h b/linux-user/host/ia64/hostdep.h
index cfabc3590b..7609bf5cd7 100644
--- a/linux-user/host/generic/hostdep.h
+++ b/linux-user/host/ia64/hostdep.h
@@ -1,6 +1,5 @@
/*
- * hostdep.h : fallback generic version of header for things
- * which are dependent on the host architecture
+ * hostdep.h : things which are dependent on the host architecture
*
* * Written by Peter Maydell <peter.maydell@linaro.org>
*
@@ -13,8 +12,4 @@
#ifndef QEMU_HOSTDEP_H
#define QEMU_HOSTDEP_H
-/* This is the fallback header which is only used if the host
- * architecture doesn't provide one in linux-user/host/$ARCH.
- */
-
#endif
diff --git a/linux-user/host/mips/hostdep.h b/linux-user/host/mips/hostdep.h
new file mode 100644
index 0000000000..7609bf5cd7
--- /dev/null
+++ b/linux-user/host/mips/hostdep.h
@@ -0,0 +1,15 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+#endif
diff --git a/linux-user/host/ppc/hostdep.h b/linux-user/host/ppc/hostdep.h
new file mode 100644
index 0000000000..7609bf5cd7
--- /dev/null
+++ b/linux-user/host/ppc/hostdep.h
@@ -0,0 +1,15 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+#endif
diff --git a/linux-user/host/ppc64/hostdep.h b/linux-user/host/ppc64/hostdep.h
new file mode 100644
index 0000000000..310e7d1b73
--- /dev/null
+++ b/linux-user/host/ppc64/hostdep.h
@@ -0,0 +1,38 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+/* We have a safe-syscall.inc.S */
+#define HAVE_SAFE_SYSCALL
+
+#ifndef __ASSEMBLER__
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
+static inline void rewind_if_in_safe_syscall(void *puc)
+{
+ struct ucontext *uc = puc;
+ unsigned long *pcreg = &uc->uc_mcontext.gp_regs[PT_NIP];
+
+ if (*pcreg > (uintptr_t)safe_syscall_start
+ && *pcreg < (uintptr_t)safe_syscall_end) {
+ *pcreg = (uintptr_t)safe_syscall_start;
+ }
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/linux-user/host/ppc64/safe-syscall.inc.S b/linux-user/host/ppc64/safe-syscall.inc.S
new file mode 100644
index 0000000000..d30050a67c
--- /dev/null
+++ b/linux-user/host/ppc64/safe-syscall.inc.S
@@ -0,0 +1,92 @@
+/*
+ * safe-syscall.inc.S : host-specific assembly fragment
+ * to handle signals occurring at the same time as system calls.
+ * This is intended to be included by linux-user/safe-syscall.S
+ *
+ * Written by Richard Henderson <rth@twiddle.net>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+ .global safe_syscall_base
+ .global safe_syscall_start
+ .global safe_syscall_end
+ .type safe_syscall_base, @function
+
+ .text
+
+ /* This is the entry point for making a system call. The calling
+ * convention here is that of a C varargs function with the
+ * first argument an 'int *' to the signal_pending flag, the
+ * second one the system call number (as a 'long'), and all further
+ * arguments being syscall arguments (also 'long').
+ * We return a long which is the syscall's return value, which
+ * may be negative-errno on failure. Conversion to the
+ * -1-and-errno-set convention is done by the calling wrapper.
+ */
+#if _CALL_ELF == 2
+safe_syscall_base:
+ .cfi_startproc
+ .localentry safe_syscall_base,0
+#else
+ .section ".opd","aw"
+ .align 3
+safe_syscall_base:
+ .quad .L.safe_syscall_base,.TOC.@tocbase,0
+ .previous
+.L.safe_syscall_base:
+ .cfi_startproc
+#endif
+ /* We enter with r3 == *signal_pending
+ * r4 == syscall number
+ * r5 ... r10 == syscall arguments
+ * and return the result in r3
+ * and the syscall instruction needs
+ * r0 == syscall number
+ * r3 ... r8 == syscall arguments
+ * and returns the result in r3
+ * Shuffle everything around appropriately.
+ */
+ mr 11, 3 /* signal_pending */
+ mr 0, 4 /* syscall number */
+ mr 3, 5 /* syscall arguments */
+ mr 4, 6
+ mr 5, 7
+ mr 6, 8
+ mr 7, 9
+ mr 8, 10
+
+ /* This next sequence of code works in conjunction with the
+ * rewind_if_safe_syscall_function(). If a signal is taken
+ * and the interrupted PC is anywhere between 'safe_syscall_start'
+ * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
+ * The code sequence must therefore be able to cope with this, and
+ * the syscall instruction must be the final one in the sequence.
+ */
+safe_syscall_start:
+ /* if signal_pending is non-zero, don't do the call */
+ lwz 12, 0(11)
+ cmpwi 0, 12, 0
+ bne- 0f
+ sc
+safe_syscall_end:
+ /* code path when we did execute the syscall */
+ bnslr+
+
+ /* syscall failed; return negative errno */
+ neg 3, 3
+ blr
+
+ /* code path when we didn't execute the syscall */
+0: addi 3, 0, -TARGET_ERESTARTSYS
+ blr
+ .cfi_endproc
+
+#if _CALL_ELF == 2
+ .size safe_syscall_base, .-safe_syscall_base
+#else
+ .size safe_syscall_base, .-.L.safe_syscall_base
+ .size .L.safe_syscall_base, .-.L.safe_syscall_base
+#endif
diff --git a/linux-user/host/s390/hostdep.h b/linux-user/host/s390/hostdep.h
new file mode 100644
index 0000000000..7609bf5cd7
--- /dev/null
+++ b/linux-user/host/s390/hostdep.h
@@ -0,0 +1,15 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+#endif
diff --git a/linux-user/host/s390x/hostdep.h b/linux-user/host/s390x/hostdep.h
new file mode 100644
index 0000000000..e95871c46a
--- /dev/null
+++ b/linux-user/host/s390x/hostdep.h
@@ -0,0 +1,38 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+/* We have a safe-syscall.inc.S */
+#define HAVE_SAFE_SYSCALL
+
+#ifndef __ASSEMBLER__
+
+/* These are defined by the safe-syscall.inc.S file */
+extern char safe_syscall_start[];
+extern char safe_syscall_end[];
+
+/* Adjust the signal context to rewind out of safe-syscall if we're in it */
+static inline void rewind_if_in_safe_syscall(void *puc)
+{
+ struct ucontext *uc = puc;
+ unsigned long *pcreg = &uc->uc_mcontext.psw.addr;
+
+ if (*pcreg > (uintptr_t)safe_syscall_start
+ && *pcreg < (uintptr_t)safe_syscall_end) {
+ *pcreg = (uintptr_t)safe_syscall_start;
+ }
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif
diff --git a/linux-user/host/s390x/safe-syscall.inc.S b/linux-user/host/s390x/safe-syscall.inc.S
new file mode 100644
index 0000000000..f1b446abf6
--- /dev/null
+++ b/linux-user/host/s390x/safe-syscall.inc.S
@@ -0,0 +1,90 @@
+/*
+ * safe-syscall.inc.S : host-specific assembly fragment
+ * to handle signals occurring at the same time as system calls.
+ * This is intended to be included by linux-user/safe-syscall.S
+ *
+ * Written by Richard Henderson <rth@twiddle.net>
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+ .global safe_syscall_base
+ .global safe_syscall_start
+ .global safe_syscall_end
+ .type safe_syscall_base, @function
+
+ /* This is the entry point for making a system call. The calling
+ * convention here is that of a C varargs function with the
+ * first argument an 'int *' to the signal_pending flag, the
+ * second one the system call number (as a 'long'), and all further
+ * arguments being syscall arguments (also 'long').
+ * We return a long which is the syscall's return value, which
+ * may be negative-errno on failure. Conversion to the
+ * -1-and-errno-set convention is done by the calling wrapper.
+ */
+safe_syscall_base:
+ .cfi_startproc
+ stmg %r6,%r15,48(%r15) /* save all call-saved registers */
+ .cfi_offset %r15,-40
+ .cfi_offset %r14,-48
+ .cfi_offset %r13,-56
+ .cfi_offset %r12,-64
+ .cfi_offset %r11,-72
+ .cfi_offset %r10,-80
+ .cfi_offset %r9,-88
+ .cfi_offset %r8,-96
+ .cfi_offset %r7,-104
+ .cfi_offset %r6,-112
+ lgr %r1,%r15
+ lg %r0,8(%r15) /* load eos */
+ aghi %r15,-160
+ .cfi_adjust_cfa_offset 160
+ stg %r1,0(%r15) /* store back chain */
+ stg %r0,8(%r15) /* store eos */
+
+ /* The syscall calling convention isn't the same as the
+ * C one:
+ * we enter with r2 == *signal_pending
+ * r3 == syscall number
+ * r4, r5, r6, (stack) == syscall arguments
+ * and return the result in r2
+ * and the syscall instruction needs
+ * r1 == syscall number
+ * r2 ... r7 == syscall arguments
+ * and returns the result in r2
+ * Shuffle everything around appropriately.
+ */
+ lgr %r8,%r2 /* signal_pending pointer */
+ lgr %r1,%r3 /* syscall number */
+ lgr %r2,%r4 /* syscall args */
+ lgr %r3,%r5
+ lgr %r4,%r6
+ lmg %r5,%r7,320(%r15)
+
+ /* This next sequence of code works in conjunction with the
+ * rewind_if_safe_syscall_function(). If a signal is taken
+ * and the interrupted PC is anywhere between 'safe_syscall_start'
+ * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'.
+ * The code sequence must therefore be able to cope with this, and
+ * the syscall instruction must be the final one in the sequence.
+ */
+safe_syscall_start:
+ /* if signal_pending is non-zero, don't do the call */
+ lt %r0,0(%r8)
+ jne 2f
+ svc 0
+safe_syscall_end:
+
+1: lg %r15,0(%r15) /* load back chain */
+ .cfi_remember_state
+ .cfi_adjust_cfa_offset -160
+ lmg %r6,%r15,48(%r15) /* load saved registers */
+ br %r14
+ .cfi_restore_state
+2: lghi %r2, -TARGET_ERESTARTSYS
+ j 1b
+ .cfi_endproc
+
+ .size safe_syscall_base, .-safe_syscall_base
diff --git a/linux-user/host/sparc/hostdep.h b/linux-user/host/sparc/hostdep.h
new file mode 100644
index 0000000000..7609bf5cd7
--- /dev/null
+++ b/linux-user/host/sparc/hostdep.h
@@ -0,0 +1,15 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+#endif
diff --git a/linux-user/host/sparc64/hostdep.h b/linux-user/host/sparc64/hostdep.h
new file mode 100644
index 0000000000..7609bf5cd7
--- /dev/null
+++ b/linux-user/host/sparc64/hostdep.h
@@ -0,0 +1,15 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+#endif
diff --git a/linux-user/host/x32/hostdep.h b/linux-user/host/x32/hostdep.h
new file mode 100644
index 0000000000..7609bf5cd7
--- /dev/null
+++ b/linux-user/host/x32/hostdep.h
@@ -0,0 +1,15 @@
+/*
+ * hostdep.h : things which are dependent on the host architecture
+ *
+ * * Written by Peter Maydell <peter.maydell@linaro.org>
+ *
+ * Copyright (C) 2016 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_HOSTDEP_H
+#define QEMU_HOSTDEP_H
+
+#endif
diff --git a/linux-user/host/x86_64/safe-syscall.inc.S b/linux-user/host/x86_64/safe-syscall.inc.S
index e09368d450..f36992daa3 100644
--- a/linux-user/host/x86_64/safe-syscall.inc.S
+++ b/linux-user/host/x86_64/safe-syscall.inc.S
@@ -67,8 +67,8 @@ safe_syscall_base:
*/
safe_syscall_start:
/* if signal_pending is non-zero, don't do the call */
- testl $1, (%rbp)
- jnz return_ERESTARTSYS
+ cmpl $0, (%rbp)
+ jnz 1f
syscall
safe_syscall_end:
/* code path for having successfully executed the syscall */
@@ -78,7 +78,7 @@ safe_syscall_end:
.cfi_restore rbp
ret
-return_ERESTARTSYS:
+1:
/* code path when we didn't execute the syscall */
.cfi_restore_state
mov $-TARGET_ERESTARTSYS, %rax
diff --git a/linux-user/main.c b/linux-user/main.c
index fd88e22fe3..617a179f14 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -157,7 +157,7 @@ static inline void exclusive_idle(void)
}
/* Start an exclusive operation.
- Must only be called from outside cpu_arm_exec. */
+ Must only be called from outside cpu_exec. */
static inline void start_exclusive(void)
{
CPUState *other_cpu;
@@ -291,7 +291,7 @@ void cpu_loop(CPUX86State *env)
for(;;) {
cpu_exec_start(cs);
- trapnr = cpu_x86_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch(trapnr) {
case 0x80:
@@ -732,7 +732,7 @@ void cpu_loop(CPUARMState *env)
for(;;) {
cpu_exec_start(cs);
- trapnr = cpu_arm_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch(trapnr) {
case EXCP_UDEF:
@@ -1068,7 +1068,7 @@ void cpu_loop(CPUARMState *env)
for (;;) {
cpu_exec_start(cs);
- trapnr = cpu_arm_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
@@ -1156,7 +1156,7 @@ void cpu_loop(CPUUniCore32State *env)
for (;;) {
cpu_exec_start(cs);
- trapnr = uc32_cpu_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
case UC32_EXCP_PRIV:
@@ -1361,7 +1361,7 @@ void cpu_loop (CPUSPARCState *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_sparc_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
/* Compute PSR before exposing state. */
@@ -1633,7 +1633,7 @@ void cpu_loop(CPUPPCState *env)
for(;;) {
cpu_exec_start(cs);
- trapnr = cpu_ppc_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch(trapnr) {
case POWERPC_EXCP_NONE:
@@ -2490,7 +2490,7 @@ void cpu_loop(CPUMIPSState *env)
for(;;) {
cpu_exec_start(cs);
- trapnr = cpu_mips_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch(trapnr) {
case EXCP_SYSCALL:
@@ -2730,7 +2730,7 @@ void cpu_loop(CPUOpenRISCState *env)
for (;;) {
cpu_exec_start(cs);
- trapnr = cpu_openrisc_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
gdbsig = 0;
@@ -2824,7 +2824,7 @@ void cpu_loop(CPUSH4State *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_sh4_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
@@ -2890,7 +2890,7 @@ void cpu_loop(CPUCRISState *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_cris_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
case 0xaa:
@@ -2955,7 +2955,7 @@ void cpu_loop(CPUMBState *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_mb_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
case 0xaa:
@@ -3072,7 +3072,7 @@ void cpu_loop(CPUM68KState *env)
for(;;) {
cpu_exec_start(cs);
- trapnr = cpu_m68k_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch(trapnr) {
case EXCP_ILLEGAL:
@@ -3215,7 +3215,7 @@ void cpu_loop(CPUAlphaState *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_alpha_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
/* All of the traps imply a transition through PALcode, which
@@ -3407,7 +3407,7 @@ void cpu_loop(CPUS390XState *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_s390x_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
case EXCP_INTERRUPT:
@@ -3716,7 +3716,7 @@ void cpu_loop(CPUTLGState *env)
while (1) {
cpu_exec_start(cs);
- trapnr = cpu_tilegx_exec(cs);
+ trapnr = cpu_exec(cs);
cpu_exec_end(cs);
switch (trapnr) {
case TILEGX_EXCP_SYSCALL:
@@ -4687,6 +4687,20 @@ int main(int argc, char **argv, char **envp)
if (regs->cp0_epc & 1) {
env->hflags |= MIPS_HFLAG_M16;
}
+ if (((info->elf_flags & EF_MIPS_NAN2008) != 0) !=
+ ((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) != 0)) {
+ if ((env->active_fpu.fcr31_rw_bitmask &
+ (1 << FCR31_NAN2008)) == 0) {
+ fprintf(stderr, "ELF binary's NaN mode not supported by CPU\n");
+ exit(1);
+ }
+ if ((info->elf_flags & EF_MIPS_NAN2008) != 0) {
+ env->active_fpu.fcr31 |= (1 << FCR31_NAN2008);
+ } else {
+ env->active_fpu.fcr31 &= ~(1 << FCR31_NAN2008);
+ }
+ restore_snan_bit_mode(env);
+ }
}
#elif defined(TARGET_OPENRISC)
{
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 56f29c35b5..cdf23a723a 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -20,6 +20,11 @@
#define THREAD __thread
+/* This is the size of the host kernel's sigset_t, needed where we make
+ * direct system calls that take a sigset_t pointer and a size.
+ */
+#define SIGSET_T_SIZE (_NSIG / 8)
+
/* This struct is used to hold certain information about the image.
* Basically, it replicates in user space what would be certain
* task_struct fields in the kernel
@@ -111,10 +116,10 @@ typedef struct TaskState {
#endif
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
/* Extra fields for semihosted binaries. */
- uint32_t heap_base;
- uint32_t heap_limit;
+ abi_ulong heap_base;
+ abi_ulong heap_limit;
#endif
- uint32_t stack_base;
+ abi_ulong stack_base;
int used; /* non zero if used */
struct image_info *info;
struct linux_binprm *bprm;
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 1dadddf2dd..9d980456ec 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -278,6 +278,14 @@ static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
tinfo->si_errno = 0;
tinfo->si_code = info->si_code;
+ /* This memset serves two purposes:
+ * (1) ensure we don't leak random junk to the guest later
+ * (2) placate false positives from gcc about fields
+ * being used uninitialized if it chooses to inline both this
+ * function and tswap_siginfo() into host_to_target_siginfo().
+ */
+ memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
+
/* This is awkward, because we have to use a combination of
* the si_code and si_signo to figure out which of the union's
* members are valid. (Within the host kernel it is always possible
@@ -397,8 +405,9 @@ static void tswap_siginfo(target_siginfo_t *tinfo,
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
{
- host_to_target_siginfo_noswap(tinfo, info);
- tswap_siginfo(tinfo, tinfo);
+ target_siginfo_t tgt_tmp;
+ host_to_target_siginfo_noswap(&tgt_tmp, info);
+ tswap_siginfo(tinfo, &tgt_tmp);
}
/* XXX: we support only POSIX RT signals are used. */
@@ -627,8 +636,16 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
* code in case the guest code provokes one in the window between
* now and it getting out to the main loop. Signals will be
* unblocked again in process_pending_signals().
+ *
+ * WARNING: we cannot use sigfillset() here because the uc_sigmask
+ * field is a kernel sigset_t, which is much smaller than the
+ * libc sigset_t which sigfillset() operates on. Using sigfillset()
+ * would write 0xff bytes off the end of the structure and trash
+ * data on the struct.
+ * We can't use sizeof(uc->uc_sigmask) either, because the libc
+ * headers define the struct field with the wrong (too large) type.
*/
- sigfillset(&uc->uc_sigmask);
+ memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
sigdelset(&uc->uc_sigmask, SIGSEGV);
sigdelset(&uc->uc_sigmask, SIGBUS);
diff --git a/linux-user/strace.c b/linux-user/strace.c
index 4046b81705..cc10dc4703 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -5,6 +5,9 @@
#include <sys/shm.h>
#include <sys/select.h>
#include <sys/mount.h>
+#include <arpa/inet.h>
+#include <netinet/tcp.h>
+#include <linux/if_packet.h>
#include <sched.h>
#include "qemu.h"
@@ -57,10 +60,15 @@ UNUSED static void print_open_flags(abi_long, int);
UNUSED static void print_syscall_prologue(const struct syscallname *);
UNUSED static void print_syscall_epilogue(const struct syscallname *);
UNUSED static void print_string(abi_long, int);
+UNUSED static void print_buf(abi_long addr, abi_long len, int last);
UNUSED static void print_raw_param(const char *, abi_long, int);
UNUSED static void print_timeval(abi_ulong, int);
UNUSED static void print_number(abi_long, int);
UNUSED static void print_signal(abi_ulong, int);
+UNUSED static void print_sockaddr(abi_ulong addr, abi_long addrlen);
+UNUSED static void print_socket_domain(int domain);
+UNUSED static void print_socket_type(int type);
+UNUSED static void print_socket_protocol(int domain, int type, int protocol);
/*
* Utility functions
@@ -146,6 +154,165 @@ print_signal(abi_ulong arg, int last)
gemu_log("%s%s", signal_name, get_comma(last));
}
+static void
+print_sockaddr(abi_ulong addr, abi_long addrlen)
+{
+ struct target_sockaddr *sa;
+ int i;
+ int sa_family;
+
+ sa = lock_user(VERIFY_READ, addr, addrlen, 1);
+ if (sa) {
+ sa_family = tswap16(sa->sa_family);
+ switch (sa_family) {
+ case AF_UNIX: {
+ struct target_sockaddr_un *un = (struct target_sockaddr_un *)sa;
+ int i;
+ gemu_log("{sun_family=AF_UNIX,sun_path=\"");
+ for (i = 0; i < addrlen -
+ offsetof(struct target_sockaddr_un, sun_path) &&
+ un->sun_path[i]; i++) {
+ gemu_log("%c", un->sun_path[i]);
+ }
+ gemu_log("\"}");
+ break;
+ }
+ case AF_INET: {
+ struct target_sockaddr_in *in = (struct target_sockaddr_in *)sa;
+ uint8_t *c = (uint8_t *)&in->sin_addr.s_addr;
+ gemu_log("{sin_family=AF_INET,sin_port=htons(%d),",
+ ntohs(in->sin_port));
+ gemu_log("sin_addr=inet_addr(\"%d.%d.%d.%d\")",
+ c[0], c[1], c[2], c[3]);
+ gemu_log("}");
+ break;
+ }
+ case AF_PACKET: {
+ struct target_sockaddr_ll *ll = (struct target_sockaddr_ll *)sa;
+ uint8_t *c = (uint8_t *)&ll->sll_addr;
+ gemu_log("{sll_family=AF_PACKET,"
+ "sll_protocol=htons(0x%04x),if%d,pkttype=",
+ ntohs(ll->sll_protocol), ll->sll_ifindex);
+ switch (ll->sll_pkttype) {
+ case PACKET_HOST:
+ gemu_log("PACKET_HOST");
+ break;
+ case PACKET_BROADCAST:
+ gemu_log("PACKET_BROADCAST");
+ break;
+ case PACKET_MULTICAST:
+ gemu_log("PACKET_MULTICAST");
+ break;
+ case PACKET_OTHERHOST:
+ gemu_log("PACKET_OTHERHOST");
+ break;
+ case PACKET_OUTGOING:
+ gemu_log("PACKET_OUTGOING");
+ break;
+ default:
+ gemu_log("%d", ll->sll_pkttype);
+ break;
+ }
+ gemu_log(",sll_addr=%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
+ gemu_log("}");
+ break;
+ }
+ default:
+ gemu_log("{sa_family=%d, sa_data={", sa->sa_family);
+ for (i = 0; i < 13; i++) {
+ gemu_log("%02x, ", sa->sa_data[i]);
+ }
+ gemu_log("%02x}", sa->sa_data[i]);
+ gemu_log("}");
+ break;
+ }
+ unlock_user(sa, addr, 0);
+ } else {
+ print_raw_param("0x"TARGET_ABI_FMT_lx, addr, 0);
+ }
+ gemu_log(", "TARGET_ABI_FMT_ld, addrlen);
+}
+
+static void
+print_socket_domain(int domain)
+{
+ switch (domain) {
+ case PF_UNIX:
+ gemu_log("PF_UNIX");
+ break;
+ case PF_INET:
+ gemu_log("PF_INET");
+ break;
+ case PF_PACKET:
+ gemu_log("PF_PACKET");
+ break;
+ default:
+ gemu_log("%d", domain);
+ break;
+ }
+}
+
+static void
+print_socket_type(int type)
+{
+ switch (type) {
+ case TARGET_SOCK_DGRAM:
+ gemu_log("SOCK_DGRAM");
+ break;
+ case TARGET_SOCK_STREAM:
+ gemu_log("SOCK_STREAM");
+ break;
+ case TARGET_SOCK_RAW:
+ gemu_log("SOCK_RAW");
+ break;
+ case TARGET_SOCK_RDM:
+ gemu_log("SOCK_RDM");
+ break;
+ case TARGET_SOCK_SEQPACKET:
+ gemu_log("SOCK_SEQPACKET");
+ break;
+ case TARGET_SOCK_PACKET:
+ gemu_log("SOCK_PACKET");
+ break;
+ }
+}
+
+static void
+print_socket_protocol(int domain, int type, int protocol)
+{
+ if (domain == AF_PACKET ||
+ (domain == AF_INET && type == TARGET_SOCK_PACKET)) {
+ switch (protocol) {
+ case 0x0003:
+ gemu_log("ETH_P_ALL");
+ break;
+ default:
+ gemu_log("%d", protocol);
+ }
+ return;
+ }
+
+ switch (protocol) {
+ case IPPROTO_IP:
+ gemu_log("IPPROTO_IP");
+ break;
+ case IPPROTO_TCP:
+ gemu_log("IPPROTO_TCP");
+ break;
+ case IPPROTO_UDP:
+ gemu_log("IPPROTO_UDP");
+ break;
+ case IPPROTO_RAW:
+ gemu_log("IPPROTO_RAW");
+ break;
+ default:
+ gemu_log("%d", protocol);
+ break;
+ }
+}
+
+
#ifdef TARGET_NR__newselect
static void
print_fdset(int n, abi_ulong target_fds_addr)
@@ -497,6 +664,26 @@ UNUSED static struct flags clone_flags[] = {
FLAG_END,
};
+UNUSED static struct flags msg_flags[] = {
+ /* send */
+ FLAG_GENERIC(MSG_CONFIRM),
+ FLAG_GENERIC(MSG_DONTROUTE),
+ FLAG_GENERIC(MSG_DONTWAIT),
+ FLAG_GENERIC(MSG_EOR),
+ FLAG_GENERIC(MSG_MORE),
+ FLAG_GENERIC(MSG_NOSIGNAL),
+ FLAG_GENERIC(MSG_OOB),
+ /* recv */
+ FLAG_GENERIC(MSG_CMSG_CLOEXEC),
+ FLAG_GENERIC(MSG_ERRQUEUE),
+ FLAG_GENERIC(MSG_PEEK),
+ FLAG_GENERIC(MSG_TRUNC),
+ FLAG_GENERIC(MSG_WAITALL),
+ /* recvmsg */
+ FLAG_GENERIC(MSG_CTRUNC),
+ FLAG_END,
+};
+
/*
* print_xxx utility functions. These are used to print syscall
* parameters in certain format. All of these have parameter
@@ -618,6 +805,36 @@ print_string(abi_long addr, int last)
}
}
+#define MAX_PRINT_BUF 40
+static void
+print_buf(abi_long addr, abi_long len, int last)
+{
+ uint8_t *s;
+ int i;
+
+ s = lock_user(VERIFY_READ, addr, len, 1);
+ if (s) {
+ gemu_log("\"");
+ for (i = 0; i < MAX_PRINT_BUF && i < len; i++) {
+ if (isprint(s[i])) {
+ gemu_log("%c", s[i]);
+ } else {
+ gemu_log("\\%o", s[i]);
+ }
+ }
+ gemu_log("\"");
+ if (i != len) {
+ gemu_log("...");
+ }
+ if (!last) {
+ gemu_log(",");
+ }
+ unlock_user(s, addr, 0);
+ } else {
+ print_pointer(addr, last);
+ }
+}
+
/*
* Prints out raw parameter using given format. Caller needs
* to do byte swapping if needed.
@@ -740,33 +957,31 @@ print_chmod(const struct syscallname *name,
#endif
#ifdef TARGET_NR_clone
+static void do_print_clone(unsigned int flags, abi_ulong newsp,
+ abi_ulong parent_tidptr, target_ulong newtls,
+ abi_ulong child_tidptr)
+{
+ print_flags(clone_flags, flags, 0);
+ print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, newsp, 0);
+ print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, parent_tidptr, 0);
+ print_raw_param("tls=0x" TARGET_ABI_FMT_lx, newtls, 0);
+ print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, child_tidptr, 1);
+}
+
static void
print_clone(const struct syscallname *name,
- abi_long arg0, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5)
+ abi_long arg1, abi_long arg2, abi_long arg3,
+ abi_long arg4, abi_long arg5, abi_long arg6)
{
print_syscall_prologue(name);
-#if defined(TARGET_M68K)
- print_flags(clone_flags, arg0, 0);
- print_raw_param("newsp=0x" TARGET_ABI_FMT_lx, arg1, 1);
-#elif defined(TARGET_SH4) || defined(TARGET_ALPHA)
- print_flags(clone_flags, arg0, 0);
- print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, arg1, 0);
- print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, arg2, 0);
- print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, arg3, 0);
- print_raw_param("tls=0x" TARGET_ABI_FMT_lx, arg4, 1);
-#elif defined(TARGET_CRIS)
- print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, arg0, 0);
- print_flags(clone_flags, arg1, 0);
- print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, arg2, 0);
- print_raw_param("tls=0x" TARGET_ABI_FMT_lx, arg3, 0);
- print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, arg4, 1);
+#if defined(TARGET_MICROBLAZE)
+ do_print_clone(arg1, arg2, arg4, arg6, arg5);
+#elif defined(TARGET_CLONE_BACKWARDS)
+ do_print_clone(arg1, arg2, arg3, arg4, arg5);
+#elif defined(TARGET_CLONE_BACKWARDS2)
+ do_print_clone(arg2, arg1, arg3, arg5, arg4);
#else
- print_flags(clone_flags, arg0, 0);
- print_raw_param("child_stack=0x" TARGET_ABI_FMT_lx, arg1, 0);
- print_raw_param("parent_tidptr=0x" TARGET_ABI_FMT_lx, arg2, 0);
- print_raw_param("tls=0x" TARGET_ABI_FMT_lx, arg3, 0);
- print_raw_param("child_tidptr=0x" TARGET_ABI_FMT_lx, arg4, 1);
+ do_print_clone(arg1, arg2, arg3, arg5, arg4);
#endif
print_syscall_epilogue(name);
}
@@ -918,6 +1133,13 @@ print_fcntl(const struct syscallname *name,
case TARGET_F_GETLEASE:
gemu_log("F_GETLEASE");
break;
+ case TARGET_F_SETPIPE_SZ:
+ gemu_log("F_SETPIPE_SZ,");
+ print_raw_param(TARGET_ABI_FMT_ld, arg2, 1);
+ break;
+ case TARGET_F_GETPIPE_SZ:
+ gemu_log("F_GETPIPE_SZ");
+ break;
case TARGET_F_DUPFD_CLOEXEC:
gemu_log("F_DUPFD_CLOEXEC,");
print_raw_param(TARGET_ABI_FMT_ld, arg2, 1);
@@ -1003,6 +1225,361 @@ print__llseek(const struct syscallname *name,
}
#endif
+#if defined(TARGET_NR_socket)
+static void
+print_socket(const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ abi_ulong domain = arg0, type = arg1, protocol = arg2;
+
+ print_syscall_prologue(name);
+ print_socket_domain(domain);
+ gemu_log(",");
+ print_socket_type(type);
+ gemu_log(",");
+ if (domain == AF_PACKET ||
+ (domain == AF_INET && type == TARGET_SOCK_PACKET)) {
+ protocol = tswap16(protocol);
+ }
+ print_socket_protocol(domain, type, protocol);
+ print_syscall_epilogue(name);
+}
+
+#endif
+
+#if defined(TARGET_NR_socketcall)
+
+#define get_user_ualx(x, gaddr, idx) \
+ get_user_ual(x, (gaddr) + (idx) * sizeof(abi_long))
+
+static void do_print_socket(const char *name, abi_long arg1)
+{
+ abi_ulong domain, type, protocol;
+
+ get_user_ualx(domain, arg1, 0);
+ get_user_ualx(type, arg1, 1);
+ get_user_ualx(protocol, arg1, 2);
+ gemu_log("%s(", name);
+ print_socket_domain(domain);
+ gemu_log(",");
+ print_socket_type(type);
+ gemu_log(",");
+ if (domain == AF_PACKET ||
+ (domain == AF_INET && type == TARGET_SOCK_PACKET)) {
+ protocol = tswap16(protocol);
+ }
+ print_socket_protocol(domain, type, protocol);
+ gemu_log(")");
+}
+
+static void do_print_sockaddr(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, addr, addrlen;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(addr, arg1, 1);
+ get_user_ualx(addrlen, arg1, 2);
+
+ gemu_log("%s(", name);
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ print_sockaddr(addr, addrlen);
+ gemu_log(")");
+}
+
+static void do_print_listen(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, backlog;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(backlog, arg1, 1);
+
+ gemu_log("%s(", name);
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, backlog, 1);
+ gemu_log(")");
+}
+
+static void do_print_socketpair(const char *name, abi_long arg1)
+{
+ abi_ulong domain, type, protocol, tab;
+
+ get_user_ualx(domain, arg1, 0);
+ get_user_ualx(type, arg1, 1);
+ get_user_ualx(protocol, arg1, 2);
+ get_user_ualx(tab, arg1, 3);
+
+ gemu_log("%s(", name);
+ print_socket_domain(domain);
+ gemu_log(",");
+ print_socket_type(type);
+ gemu_log(",");
+ print_socket_protocol(domain, type, protocol);
+ gemu_log(",");
+ print_raw_param(TARGET_ABI_FMT_lx, tab, 1);
+ gemu_log(")");
+}
+
+static void do_print_sendrecv(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, msg, len, flags;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(msg, arg1, 1);
+ get_user_ualx(len, arg1, 2);
+ get_user_ualx(flags, arg1, 3);
+
+ gemu_log("%s(", name);
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ print_buf(msg, len, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, len, 0);
+ print_flags(msg_flags, flags, 1);
+ gemu_log(")");
+}
+
+static void do_print_msgaddr(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, msg, len, flags, addr, addrlen;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(msg, arg1, 1);
+ get_user_ualx(len, arg1, 2);
+ get_user_ualx(flags, arg1, 3);
+ get_user_ualx(addr, arg1, 4);
+ get_user_ualx(addrlen, arg1, 5);
+
+ gemu_log("%s(", name);
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ print_buf(msg, len, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, len, 0);
+ print_flags(msg_flags, flags, 0);
+ print_sockaddr(addr, addrlen);
+ gemu_log(")");
+}
+
+static void do_print_shutdown(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, how;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(how, arg1, 1);
+
+ gemu_log("shutdown(");
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ switch (how) {
+ case SHUT_RD:
+ gemu_log("SHUT_RD");
+ break;
+ case SHUT_WR:
+ gemu_log("SHUT_WR");
+ break;
+ case SHUT_RDWR:
+ gemu_log("SHUT_RDWR");
+ break;
+ default:
+ print_raw_param(TARGET_ABI_FMT_ld, how, 1);
+ break;
+ }
+ gemu_log(")");
+}
+
+static void do_print_msg(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, msg, flags;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(msg, arg1, 1);
+ get_user_ualx(flags, arg1, 2);
+
+ gemu_log("%s(", name);
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ print_pointer(msg, 0);
+ print_flags(msg_flags, flags, 1);
+ gemu_log(")");
+}
+
+static void do_print_sockopt(const char *name, abi_long arg1)
+{
+ abi_ulong sockfd, level, optname, optval, optlen;
+
+ get_user_ualx(sockfd, arg1, 0);
+ get_user_ualx(level, arg1, 1);
+ get_user_ualx(optname, arg1, 2);
+ get_user_ualx(optval, arg1, 3);
+ get_user_ualx(optlen, arg1, 4);
+
+ gemu_log("%s(", name);
+ print_raw_param(TARGET_ABI_FMT_ld, sockfd, 0);
+ switch (level) {
+ case SOL_TCP:
+ gemu_log("SOL_TCP,");
+ print_raw_param(TARGET_ABI_FMT_ld, optname, 0);
+ print_pointer(optval, 0);
+ break;
+ case SOL_IP:
+ gemu_log("SOL_IP,");
+ print_raw_param(TARGET_ABI_FMT_ld, optname, 0);
+ print_pointer(optval, 0);
+ break;
+ case SOL_RAW:
+ gemu_log("SOL_RAW,");
+ print_raw_param(TARGET_ABI_FMT_ld, optname, 0);
+ print_pointer(optval, 0);
+ break;
+ case TARGET_SOL_SOCKET:
+ gemu_log("SOL_SOCKET,");
+ switch (optname) {
+ case TARGET_SO_DEBUG:
+ gemu_log("SO_DEBUG,");
+print_optint:
+ print_number(optval, 0);
+ break;
+ case TARGET_SO_REUSEADDR:
+ gemu_log("SO_REUSEADDR,");
+ goto print_optint;
+ case TARGET_SO_TYPE:
+ gemu_log("SO_TYPE,");
+ goto print_optint;
+ case TARGET_SO_ERROR:
+ gemu_log("SO_ERROR,");
+ goto print_optint;
+ case TARGET_SO_DONTROUTE:
+ gemu_log("SO_DONTROUTE,");
+ goto print_optint;
+ case TARGET_SO_BROADCAST:
+ gemu_log("SO_BROADCAST,");
+ goto print_optint;
+ case TARGET_SO_SNDBUF:
+ gemu_log("SO_SNDBUF,");
+ goto print_optint;
+ case TARGET_SO_RCVBUF:
+ gemu_log("SO_RCVBUF,");
+ goto print_optint;
+ case TARGET_SO_KEEPALIVE:
+ gemu_log("SO_KEEPALIVE,");
+ goto print_optint;
+ case TARGET_SO_OOBINLINE:
+ gemu_log("SO_OOBINLINE,");
+ goto print_optint;
+ case TARGET_SO_NO_CHECK:
+ gemu_log("SO_NO_CHECK,");
+ goto print_optint;
+ case TARGET_SO_PRIORITY:
+ gemu_log("SO_PRIORITY,");
+ goto print_optint;
+ case TARGET_SO_BSDCOMPAT:
+ gemu_log("SO_BSDCOMPAT,");
+ goto print_optint;
+ case TARGET_SO_PASSCRED:
+ gemu_log("SO_PASSCRED,");
+ goto print_optint;
+ case TARGET_SO_TIMESTAMP:
+ gemu_log("SO_TIMESTAMP,");
+ goto print_optint;
+ case TARGET_SO_RCVLOWAT:
+ gemu_log("SO_RCVLOWAT,");
+ goto print_optint;
+ case TARGET_SO_RCVTIMEO:
+ gemu_log("SO_RCVTIMEO,");
+ print_timeval(optval, 0);
+ break;
+ case TARGET_SO_SNDTIMEO:
+ gemu_log("SO_SNDTIMEO,");
+ print_timeval(optval, 0);
+ break;
+ case TARGET_SO_ATTACH_FILTER: {
+ struct target_sock_fprog *fprog;
+
+ gemu_log("SO_ATTACH_FILTER,");
+
+ if (lock_user_struct(VERIFY_READ, fprog, optval, 0)) {
+ struct target_sock_filter *filter;
+ gemu_log("{");
+ if (lock_user_struct(VERIFY_READ, filter,
+ tswapal(fprog->filter), 0)) {
+ int i;
+ for (i = 0; i < tswap16(fprog->len) - 1; i++) {
+ gemu_log("[%d]{0x%x,%d,%d,0x%x},",
+ i, tswap16(filter[i].code),
+ filter[i].jt, filter[i].jf,
+ tswap32(filter[i].k));
+ }
+ gemu_log("[%d]{0x%x,%d,%d,0x%x}",
+ i, tswap16(filter[i].code),
+ filter[i].jt, filter[i].jf,
+ tswap32(filter[i].k));
+ } else {
+ gemu_log(TARGET_ABI_FMT_lx, tswapal(fprog->filter));
+ }
+ gemu_log(",%d},", tswap16(fprog->len));
+ unlock_user(fprog, optval, 0);
+ } else {
+ print_pointer(optval, 0);
+ }
+ break;
+ }
+ default:
+ print_raw_param(TARGET_ABI_FMT_ld, optname, 0);
+ print_pointer(optval, 0);
+ break;
+ }
+ break;
+ default:
+ print_raw_param(TARGET_ABI_FMT_ld, level, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, optname, 0);
+ print_pointer(optval, 0);
+ break;
+ }
+ print_raw_param(TARGET_ABI_FMT_ld, optlen, 1);
+ gemu_log(")");
+}
+
+#define PRINT_SOCKOP(name, func) \
+ [SOCKOP_##name] = { #name, func }
+
+static struct {
+ const char *name;
+ void (*print)(const char *, abi_long);
+} scall[] = {
+ PRINT_SOCKOP(socket, do_print_socket),
+ PRINT_SOCKOP(bind, do_print_sockaddr),
+ PRINT_SOCKOP(connect, do_print_sockaddr),
+ PRINT_SOCKOP(listen, do_print_listen),
+ PRINT_SOCKOP(accept, do_print_sockaddr),
+ PRINT_SOCKOP(getsockname, do_print_sockaddr),
+ PRINT_SOCKOP(getpeername, do_print_sockaddr),
+ PRINT_SOCKOP(socketpair, do_print_socketpair),
+ PRINT_SOCKOP(send, do_print_sendrecv),
+ PRINT_SOCKOP(recv, do_print_sendrecv),
+ PRINT_SOCKOP(sendto, do_print_msgaddr),
+ PRINT_SOCKOP(recvfrom, do_print_msgaddr),
+ PRINT_SOCKOP(shutdown, do_print_shutdown),
+ PRINT_SOCKOP(sendmsg, do_print_msg),
+ PRINT_SOCKOP(recvmsg, do_print_msg),
+ PRINT_SOCKOP(setsockopt, do_print_sockopt),
+ PRINT_SOCKOP(getsockopt, do_print_sockopt),
+};
+
+static void
+print_socketcall(const struct syscallname *name,
+ abi_long arg0, abi_long arg1, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ if (arg0 >= 0 && arg0 < ARRAY_SIZE(scall) && scall[arg0].print) {
+ scall[arg0].print(scall[arg0].name, arg1);
+ return;
+ }
+ print_syscall_prologue(name);
+ print_raw_param(TARGET_ABI_FMT_ld, arg0, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, arg1, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, arg2, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, arg3, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, arg4, 0);
+ print_raw_param(TARGET_ABI_FMT_ld, arg5, 0);
+ print_syscall_epilogue(name);
+}
+#endif
+
#if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) || \
defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64)
static void
diff --git a/linux-user/strace.list b/linux-user/strace.list
index aa0cd735cc..aa967a2475 100644
--- a/linux-user/strace.list
+++ b/linux-user/strace.list
@@ -337,7 +337,8 @@
{ TARGET_NR_getsockopt, "getsockopt" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_get_thread_area
-{ TARGET_NR_get_thread_area, "get_thread_area" , NULL, NULL, NULL },
+{ TARGET_NR_get_thread_area, "get_thread_area", "%s(0x"TARGET_ABI_FMT_lx")",
+ NULL, NULL },
#endif
#ifdef TARGET_NR_gettid
{ TARGET_NR_gettid, "gettid" , NULL, NULL, NULL },
@@ -1234,7 +1235,8 @@
{ TARGET_NR_setsockopt, "setsockopt" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_set_thread_area
-{ TARGET_NR_set_thread_area, "set_thread_area" , NULL, NULL, NULL },
+{ TARGET_NR_set_thread_area, "set_thread_area", "%s(0x"TARGET_ABI_FMT_lx")",
+ NULL, NULL },
#endif
#ifdef TARGET_NR_set_tid_address
{ TARGET_NR_set_tid_address, "set_tid_address" , NULL, NULL, NULL },
@@ -1291,10 +1293,10 @@
{ TARGET_NR_sigsuspend, "sigsuspend" , NULL, NULL, NULL },
#endif
#ifdef TARGET_NR_socket
-{ TARGET_NR_socket, "socket" , NULL, NULL, NULL },
+{ TARGET_NR_socket, "socket" , NULL, print_socket, NULL },
#endif
#ifdef TARGET_NR_socketcall
-{ TARGET_NR_socketcall, "socketcall" , NULL, NULL, NULL },
+{ TARGET_NR_socketcall, "socketcall" , NULL, print_socketcall, NULL },
#endif
#ifdef TARGET_NR_socketpair
{ TARGET_NR_socketpair, "socketpair" , NULL, NULL, NULL },
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 1c17b741c2..8bf6205dc2 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -123,11 +123,6 @@ int __clone2(int (*fn)(void *), void *child_stack_base,
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
-/* This is the size of the host kernel's sigset_t, needed where we make
- * direct system calls that take a sigset_t pointer and a size.
- */
-#define SIGSET_T_SIZE (_NSIG / 8)
-
#undef _syscall0
#undef _syscall1
#undef _syscall2
@@ -783,6 +778,16 @@ safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
* the libc function.
*/
#define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
+/* Similarly for fcntl. Note that callers must always:
+ * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
+ * use the flock64 struct rather than unsuffixed flock
+ * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
+ */
+#ifdef __NR_fcntl64
+#define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
+#else
+#define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
+#endif
static inline int host_to_target_sock_type(int host_type)
{
@@ -1687,6 +1692,7 @@ static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
struct nlmsgerr *e = NLMSG_DATA(nlh);
e->error = tswap32(e->error);
tswap_nlmsghdr(&e->msg);
+ return 0;
}
default:
ret = target_to_host_nlmsg(nlh);
@@ -1942,29 +1948,35 @@ static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
case RTM_NEWLINK:
case RTM_DELLINK:
case RTM_GETLINK:
- ifi = NLMSG_DATA(nlh);
- ifi->ifi_type = tswap16(ifi->ifi_type);
- ifi->ifi_index = tswap32(ifi->ifi_index);
- ifi->ifi_flags = tswap32(ifi->ifi_flags);
- ifi->ifi_change = tswap32(ifi->ifi_change);
- host_to_target_link_rtattr(IFLA_RTA(ifi),
- nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
+ ifi = NLMSG_DATA(nlh);
+ ifi->ifi_type = tswap16(ifi->ifi_type);
+ ifi->ifi_index = tswap32(ifi->ifi_index);
+ ifi->ifi_flags = tswap32(ifi->ifi_flags);
+ ifi->ifi_change = tswap32(ifi->ifi_change);
+ host_to_target_link_rtattr(IFLA_RTA(ifi),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
+ }
break;
case RTM_NEWADDR:
case RTM_DELADDR:
case RTM_GETADDR:
- ifa = NLMSG_DATA(nlh);
- ifa->ifa_index = tswap32(ifa->ifa_index);
- host_to_target_addr_rtattr(IFA_RTA(ifa),
- nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
+ ifa = NLMSG_DATA(nlh);
+ ifa->ifa_index = tswap32(ifa->ifa_index);
+ host_to_target_addr_rtattr(IFA_RTA(ifa),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
+ }
break;
case RTM_NEWROUTE:
case RTM_DELROUTE:
case RTM_GETROUTE:
- rtm = NLMSG_DATA(nlh);
- rtm->rtm_flags = tswap32(rtm->rtm_flags);
- host_to_target_route_rtattr(RTM_RTA(rtm),
- nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
+ rtm = NLMSG_DATA(nlh);
+ rtm->rtm_flags = tswap32(rtm->rtm_flags);
+ host_to_target_route_rtattr(RTM_RTA(rtm),
+ nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
+ }
break;
default:
return -TARGET_EINVAL;
@@ -2080,30 +2092,36 @@ static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
break;
case RTM_NEWLINK:
case RTM_DELLINK:
- ifi = NLMSG_DATA(nlh);
- ifi->ifi_type = tswap16(ifi->ifi_type);
- ifi->ifi_index = tswap32(ifi->ifi_index);
- ifi->ifi_flags = tswap32(ifi->ifi_flags);
- ifi->ifi_change = tswap32(ifi->ifi_change);
- target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
- NLMSG_LENGTH(sizeof(*ifi)));
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
+ ifi = NLMSG_DATA(nlh);
+ ifi->ifi_type = tswap16(ifi->ifi_type);
+ ifi->ifi_index = tswap32(ifi->ifi_index);
+ ifi->ifi_flags = tswap32(ifi->ifi_flags);
+ ifi->ifi_change = tswap32(ifi->ifi_change);
+ target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*ifi)));
+ }
break;
case RTM_GETADDR:
case RTM_NEWADDR:
case RTM_DELADDR:
- ifa = NLMSG_DATA(nlh);
- ifa->ifa_index = tswap32(ifa->ifa_index);
- target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
- NLMSG_LENGTH(sizeof(*ifa)));
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
+ ifa = NLMSG_DATA(nlh);
+ ifa->ifa_index = tswap32(ifa->ifa_index);
+ target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*ifa)));
+ }
break;
case RTM_GETROUTE:
break;
case RTM_NEWROUTE:
case RTM_DELROUTE:
- rtm = NLMSG_DATA(nlh);
- rtm->rtm_flags = tswap32(rtm->rtm_flags);
- target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
- NLMSG_LENGTH(sizeof(*rtm)));
+ if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
+ rtm = NLMSG_DATA(nlh);
+ rtm->rtm_flags = tswap32(rtm->rtm_flags);
+ target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
+ NLMSG_LENGTH(sizeof(*rtm)));
+ }
break;
default:
return -TARGET_EOPNOTSUPP;
@@ -2985,7 +3003,7 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
len = ret;
if (fd_trans_host_to_target_data(fd)) {
ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
- msg.msg_iov->iov_len);
+ len);
} else {
ret = host_to_target_cmsg(msgp, &msg);
}
@@ -5541,11 +5559,11 @@ static int target_to_host_fcntl_cmd(int cmd)
case TARGET_F_SETFL:
return cmd;
case TARGET_F_GETLK:
- return F_GETLK;
- case TARGET_F_SETLK:
- return F_SETLK;
- case TARGET_F_SETLKW:
- return F_SETLKW;
+ return F_GETLK64;
+ case TARGET_F_SETLK:
+ return F_SETLK64;
+ case TARGET_F_SETLKW:
+ return F_SETLKW64;
case TARGET_F_GETOWN:
return F_GETOWN;
case TARGET_F_SETOWN:
@@ -5580,6 +5598,12 @@ static int target_to_host_fcntl_cmd(int cmd)
case TARGET_F_SETOWN_EX:
return F_SETOWN_EX;
#endif
+#ifdef F_SETPIPE_SZ
+ case TARGET_F_SETPIPE_SZ:
+ return F_SETPIPE_SZ;
+ case TARGET_F_GETPIPE_SZ:
+ return F_GETPIPE_SZ;
+#endif
default:
return -TARGET_EINVAL;
}
@@ -5596,12 +5620,134 @@ static const bitmask_transtbl flock_tbl[] = {
{ 0, 0, 0, 0 }
};
-static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
+static inline abi_long copy_from_user_flock(struct flock64 *fl,
+ abi_ulong target_flock_addr)
{
- struct flock fl;
struct target_flock *target_fl;
+ short l_type;
+
+ if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+
+ __get_user(l_type, &target_fl->l_type);
+ fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
+ __get_user(fl->l_whence, &target_fl->l_whence);
+ __get_user(fl->l_start, &target_fl->l_start);
+ __get_user(fl->l_len, &target_fl->l_len);
+ __get_user(fl->l_pid, &target_fl->l_pid);
+ unlock_user_struct(target_fl, target_flock_addr, 0);
+ return 0;
+}
+
+static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
+ const struct flock64 *fl)
+{
+ struct target_flock *target_fl;
+ short l_type;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+
+ l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
+ __put_user(l_type, &target_fl->l_type);
+ __put_user(fl->l_whence, &target_fl->l_whence);
+ __put_user(fl->l_start, &target_fl->l_start);
+ __put_user(fl->l_len, &target_fl->l_len);
+ __put_user(fl->l_pid, &target_fl->l_pid);
+ unlock_user_struct(target_fl, target_flock_addr, 1);
+ return 0;
+}
+
+typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
+typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
+
+#if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
+static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
+ abi_ulong target_flock_addr)
+{
+ struct target_eabi_flock64 *target_fl;
+ short l_type;
+
+ if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+
+ __get_user(l_type, &target_fl->l_type);
+ fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
+ __get_user(fl->l_whence, &target_fl->l_whence);
+ __get_user(fl->l_start, &target_fl->l_start);
+ __get_user(fl->l_len, &target_fl->l_len);
+ __get_user(fl->l_pid, &target_fl->l_pid);
+ unlock_user_struct(target_fl, target_flock_addr, 0);
+ return 0;
+}
+
+static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
+ const struct flock64 *fl)
+{
+ struct target_eabi_flock64 *target_fl;
+ short l_type;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+
+ l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
+ __put_user(l_type, &target_fl->l_type);
+ __put_user(fl->l_whence, &target_fl->l_whence);
+ __put_user(fl->l_start, &target_fl->l_start);
+ __put_user(fl->l_len, &target_fl->l_len);
+ __put_user(fl->l_pid, &target_fl->l_pid);
+ unlock_user_struct(target_fl, target_flock_addr, 1);
+ return 0;
+}
+#endif
+
+static inline abi_long copy_from_user_flock64(struct flock64 *fl,
+ abi_ulong target_flock_addr)
+{
+ struct target_flock64 *target_fl;
+ short l_type;
+
+ if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
+ return -TARGET_EFAULT;
+ }
+
+ __get_user(l_type, &target_fl->l_type);
+ fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
+ __get_user(fl->l_whence, &target_fl->l_whence);
+ __get_user(fl->l_start, &target_fl->l_start);
+ __get_user(fl->l_len, &target_fl->l_len);
+ __get_user(fl->l_pid, &target_fl->l_pid);
+ unlock_user_struct(target_fl, target_flock_addr, 0);
+ return 0;
+}
+
+static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
+ const struct flock64 *fl)
+{
+ struct target_flock64 *target_fl;
+ short l_type;
+
+ if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
+ return -TARGET_EFAULT;
+ }
+
+ l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
+ __put_user(l_type, &target_fl->l_type);
+ __put_user(fl->l_whence, &target_fl->l_whence);
+ __put_user(fl->l_start, &target_fl->l_start);
+ __put_user(fl->l_len, &target_fl->l_len);
+ __put_user(fl->l_pid, &target_fl->l_pid);
+ unlock_user_struct(target_fl, target_flock_addr, 1);
+ return 0;
+}
+
+static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
+{
struct flock64 fl64;
- struct target_flock64 *target_fl64;
#ifdef F_GETOWN_EX
struct f_owner_ex fox;
struct target_f_owner_ex *target_fox;
@@ -5614,94 +5760,60 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
switch(cmd) {
case TARGET_F_GETLK:
- if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
- return -TARGET_EFAULT;
- fl.l_type =
- target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
- fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswapal(target_fl->l_start);
- fl.l_len = tswapal(target_fl->l_len);
- fl.l_pid = tswap32(target_fl->l_pid);
- unlock_user_struct(target_fl, arg, 0);
- ret = get_errno(fcntl(fd, host_cmd, &fl));
+ ret = copy_from_user_flock(&fl64, arg);
+ if (ret) {
+ return ret;
+ }
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
if (ret == 0) {
- if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
- return -TARGET_EFAULT;
- target_fl->l_type =
- host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
- target_fl->l_whence = tswap16(fl.l_whence);
- target_fl->l_start = tswapal(fl.l_start);
- target_fl->l_len = tswapal(fl.l_len);
- target_fl->l_pid = tswap32(fl.l_pid);
- unlock_user_struct(target_fl, arg, 1);
+ ret = copy_to_user_flock(arg, &fl64);
}
break;
case TARGET_F_SETLK:
case TARGET_F_SETLKW:
- if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
- return -TARGET_EFAULT;
- fl.l_type =
- target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
- fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswapal(target_fl->l_start);
- fl.l_len = tswapal(target_fl->l_len);
- fl.l_pid = tswap32(target_fl->l_pid);
- unlock_user_struct(target_fl, arg, 0);
- ret = get_errno(fcntl(fd, host_cmd, &fl));
+ ret = copy_from_user_flock(&fl64, arg);
+ if (ret) {
+ return ret;
+ }
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
break;
case TARGET_F_GETLK64:
- if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
- return -TARGET_EFAULT;
- fl64.l_type =
- target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
- fl64.l_whence = tswap16(target_fl64->l_whence);
- fl64.l_start = tswap64(target_fl64->l_start);
- fl64.l_len = tswap64(target_fl64->l_len);
- fl64.l_pid = tswap32(target_fl64->l_pid);
- unlock_user_struct(target_fl64, arg, 0);
- ret = get_errno(fcntl(fd, host_cmd, &fl64));
+ ret = copy_from_user_flock64(&fl64, arg);
+ if (ret) {
+ return ret;
+ }
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
if (ret == 0) {
- if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
- return -TARGET_EFAULT;
- target_fl64->l_type =
- host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
- target_fl64->l_whence = tswap16(fl64.l_whence);
- target_fl64->l_start = tswap64(fl64.l_start);
- target_fl64->l_len = tswap64(fl64.l_len);
- target_fl64->l_pid = tswap32(fl64.l_pid);
- unlock_user_struct(target_fl64, arg, 1);
+ ret = copy_to_user_flock64(arg, &fl64);
}
break;
case TARGET_F_SETLK64:
case TARGET_F_SETLKW64:
- if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
- return -TARGET_EFAULT;
- fl64.l_type =
- target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
- fl64.l_whence = tswap16(target_fl64->l_whence);
- fl64.l_start = tswap64(target_fl64->l_start);
- fl64.l_len = tswap64(target_fl64->l_len);
- fl64.l_pid = tswap32(target_fl64->l_pid);
- unlock_user_struct(target_fl64, arg, 0);
- ret = get_errno(fcntl(fd, host_cmd, &fl64));
+ ret = copy_from_user_flock64(&fl64, arg);
+ if (ret) {
+ return ret;
+ }
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
break;
case TARGET_F_GETFL:
- ret = get_errno(fcntl(fd, host_cmd, arg));
+ ret = get_errno(safe_fcntl(fd, host_cmd, arg));
if (ret >= 0) {
ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
}
break;
case TARGET_F_SETFL:
- ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
+ ret = get_errno(safe_fcntl(fd, host_cmd,
+ target_to_host_bitmask(arg,
+ fcntl_flags_tbl)));
break;
#ifdef F_GETOWN_EX
case TARGET_F_GETOWN_EX:
- ret = get_errno(fcntl(fd, host_cmd, &fox));
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
if (ret >= 0) {
if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
return -TARGET_EFAULT;
@@ -5719,7 +5831,7 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
fox.type = tswap32(target_fox->type);
fox.pid = tswap32(target_fox->pid);
unlock_user_struct(target_fox, arg, 0);
- ret = get_errno(fcntl(fd, host_cmd, &fox));
+ ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
break;
#endif
@@ -5729,11 +5841,13 @@ static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
case TARGET_F_GETSIG:
case TARGET_F_SETLEASE:
case TARGET_F_GETLEASE:
- ret = get_errno(fcntl(fd, host_cmd, arg));
+ case TARGET_F_SETPIPE_SZ:
+ case TARGET_F_GETPIPE_SZ:
+ ret = get_errno(safe_fcntl(fd, host_cmd, arg));
break;
default:
- ret = get_errno(fcntl(fd, cmd, arg));
+ ret = get_errno(safe_fcntl(fd, cmd, arg));
break;
}
return ret;
@@ -6690,6 +6804,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
#ifdef DEBUG
gemu_log("syscall %d", num);
#endif
+ trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
if(do_strace)
print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
@@ -7783,8 +7898,11 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_rt_sigqueueinfo:
{
siginfo_t uinfo;
- if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
+
+ p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
+ if (!p) {
goto efault;
+ }
target_to_host_siginfo(&uinfo, p);
unlock_user(p, arg1, 0);
ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
@@ -10132,9 +10250,14 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
{
int cmd;
struct flock64 fl;
- struct target_flock64 *target_fl;
+ from_flock64_fn *copyfrom = copy_from_user_flock64;
+ to_flock64_fn *copyto = copy_to_user_flock64;
+
#ifdef TARGET_ARM
- struct target_eabi_flock64 *target_efl;
+ if (((CPUARMState *)cpu_env)->eabi) {
+ copyfrom = copy_from_user_eabi_flock64;
+ copyto = copy_to_user_eabi_flock64;
+ }
#endif
cmd = target_to_host_fcntl_cmd(arg2);
@@ -10145,80 +10268,23 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
switch(arg2) {
case TARGET_F_GETLK64:
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi) {
- if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
- goto efault;
- fl.l_type = tswap16(target_efl->l_type);
- fl.l_whence = tswap16(target_efl->l_whence);
- fl.l_start = tswap64(target_efl->l_start);
- fl.l_len = tswap64(target_efl->l_len);
- fl.l_pid = tswap32(target_efl->l_pid);
- unlock_user_struct(target_efl, arg3, 0);
- } else
-#endif
- {
- if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
- goto efault;
- fl.l_type = tswap16(target_fl->l_type);
- fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswap64(target_fl->l_start);
- fl.l_len = tswap64(target_fl->l_len);
- fl.l_pid = tswap32(target_fl->l_pid);
- unlock_user_struct(target_fl, arg3, 0);
+ ret = copyfrom(&fl, arg3);
+ if (ret) {
+ break;
}
ret = get_errno(fcntl(arg1, cmd, &fl));
- if (ret == 0) {
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi) {
- if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
- goto efault;
- target_efl->l_type = tswap16(fl.l_type);
- target_efl->l_whence = tswap16(fl.l_whence);
- target_efl->l_start = tswap64(fl.l_start);
- target_efl->l_len = tswap64(fl.l_len);
- target_efl->l_pid = tswap32(fl.l_pid);
- unlock_user_struct(target_efl, arg3, 1);
- } else
-#endif
- {
- if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
- goto efault;
- target_fl->l_type = tswap16(fl.l_type);
- target_fl->l_whence = tswap16(fl.l_whence);
- target_fl->l_start = tswap64(fl.l_start);
- target_fl->l_len = tswap64(fl.l_len);
- target_fl->l_pid = tswap32(fl.l_pid);
- unlock_user_struct(target_fl, arg3, 1);
- }
- }
+ if (ret == 0) {
+ ret = copyto(arg3, &fl);
+ }
break;
case TARGET_F_SETLK64:
case TARGET_F_SETLKW64:
-#ifdef TARGET_ARM
- if (((CPUARMState *)cpu_env)->eabi) {
- if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
- goto efault;
- fl.l_type = tswap16(target_efl->l_type);
- fl.l_whence = tswap16(target_efl->l_whence);
- fl.l_start = tswap64(target_efl->l_start);
- fl.l_len = tswap64(target_efl->l_len);
- fl.l_pid = tswap32(target_efl->l_pid);
- unlock_user_struct(target_efl, arg3, 0);
- } else
-#endif
- {
- if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
- goto efault;
- fl.l_type = tswap16(target_fl->l_type);
- fl.l_whence = tswap16(target_fl->l_whence);
- fl.l_start = tswap64(target_fl->l_start);
- fl.l_len = tswap64(target_fl->l_len);
- fl.l_pid = tswap32(target_fl->l_pid);
- unlock_user_struct(target_fl, arg3, 0);
+ ret = copyfrom(&fl, arg3);
+ if (ret) {
+ break;
}
- ret = get_errno(fcntl(arg1, cmd, &fl));
+ ret = get_errno(safe_fcntl(arg1, cmd, &fl));
break;
default:
ret = do_fcntl(arg1, arg2, arg3);
@@ -11182,6 +11248,7 @@ fail:
#endif
if(do_strace)
print_syscall_ret(num, ret);
+ trace_guest_user_syscall_ret(cpu, num, ret);
return ret;
efault:
ret = -TARGET_EFAULT;
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 6ee9251c50..dce1bcc91d 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -135,6 +135,24 @@ struct target_sockaddr_ll {
uint8_t sll_addr[8]; /* Physical layer address */
};
+struct target_sockaddr_un {
+ uint16_t su_family;
+ uint8_t sun_path[108];
+};
+
+struct target_in_addr {
+ uint32_t s_addr; /* big endian */
+};
+
+struct target_sockaddr_in {
+ uint16_t sin_family;
+ int16_t sin_port; /* big endian */
+ struct target_in_addr sin_addr;
+ uint8_t __pad[sizeof(struct target_sockaddr) -
+ sizeof(uint16_t) - sizeof(int16_t) -
+ sizeof(struct target_in_addr)];
+};
+
struct target_sock_filter {
abi_ushort code;
uint8_t jt;
@@ -147,10 +165,6 @@ struct target_sock_fprog {
abi_ulong filter;
};
-struct target_in_addr {
- uint32_t s_addr; /* big endian */
-};
-
struct target_ip_mreq {
struct target_in_addr imr_multiaddr;
struct target_in_addr imr_address;
@@ -2166,6 +2180,8 @@ struct target_statfs64 {
#define TARGET_F_SETLEASE (TARGET_F_LINUX_SPECIFIC_BASE + 0)
#define TARGET_F_GETLEASE (TARGET_F_LINUX_SPECIFIC_BASE + 1)
#define TARGET_F_DUPFD_CLOEXEC (TARGET_F_LINUX_SPECIFIC_BASE + 6)
+#define TARGET_F_SETPIPE_SZ (TARGET_F_LINUX_SPECIFIC_BASE + 7)
+#define TARGET_F_GETPIPE_SZ (TARGET_F_LINUX_SPECIFIC_BASE + 8)
#define TARGET_F_NOTIFY (TARGET_F_LINUX_SPECIFIC_BASE+2)
#if defined(TARGET_ALPHA)
diff --git a/memory.c b/memory.c
index 8549c791d7..0eb6895fe6 100644
--- a/memory.c
+++ b/memory.c
@@ -1376,6 +1376,21 @@ void memory_region_init_alias(MemoryRegion *mr,
mr->alias_offset = offset;
}
+void memory_region_init_rom(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ Error **errp)
+{
+ memory_region_init(mr, owner, name, size);
+ mr->ram = true;
+ mr->readonly = true;
+ mr->terminates = true;
+ mr->destructor = memory_region_destructor_ram;
+ mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
+}
+
void memory_region_init_rom_device(MemoryRegion *mr,
Object *owner,
const MemoryRegionOps *ops,
@@ -1384,6 +1399,7 @@ void memory_region_init_rom_device(MemoryRegion *mr,
uint64_t size,
Error **errp)
{
+ assert(ops);
memory_region_init(mr, owner, name, size);
mr->ops = ops;
mr->opaque = opaque;
@@ -1499,6 +1515,10 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
{
+ if (mr->iommu_ops->notify_started &&
+ QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
+ mr->iommu_ops->notify_started(mr);
+ }
notifier_list_add(&mr->iommu_notify, n);
}
@@ -1532,9 +1552,13 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write)
}
}
-void memory_region_unregister_iommu_notifier(Notifier *n)
+void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n)
{
notifier_remove(n);
+ if (mr->iommu_ops->notify_stopped &&
+ QLIST_EMPTY(&mr->iommu_notify.notifiers)) {
+ mr->iommu_ops->notify_stopped(mr);
+ }
}
void memory_region_notify_iommu(MemoryRegion *mr,
diff --git a/net/net.c b/net/net.c
index 5f3e5a9ff5..75bb1770f9 100644
--- a/net/net.c
+++ b/net/net.c
@@ -722,7 +722,7 @@ ssize_t qemu_deliver_packet_iov(NetClientState *sender,
return 0;
}
- if (nc->info->receive_iov) {
+ if (nc->info->receive_iov && !(flags & QEMU_NET_PACKET_FLAG_RAW)) {
ret = nc->info->receive_iov(nc, iov, iovcnt);
} else {
ret = nc_sendv_compat(nc, iov, iovcnt, flags);
diff --git a/net/socket.c b/net/socket.c
index 333fb9ecfa..ae6f92101d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -489,41 +489,30 @@ static int net_socket_listen_init(NetClientState *peer,
{
NetClientState *nc;
NetSocketState *s;
- struct sockaddr_in saddr;
- int fd, ret;
+ SocketAddress *saddr;
+ int ret;
+ Error *local_error = NULL;
- if (parse_host_port(&saddr, host_str) < 0)
- return -1;
-
- fd = qemu_socket(PF_INET, SOCK_STREAM, 0);
- if (fd < 0) {
- perror("socket");
+ saddr = socket_parse(host_str, &local_error);
+ if (saddr == NULL) {
+ error_report_err(local_error);
return -1;
}
- qemu_set_nonblock(fd);
- socket_set_fast_reuse(fd);
-
- ret = bind(fd, (struct sockaddr *)&saddr, sizeof(saddr));
+ ret = socket_listen(saddr, &local_error);
if (ret < 0) {
- perror("bind");
- closesocket(fd);
- return -1;
- }
- ret = listen(fd, 0);
- if (ret < 0) {
- perror("listen");
- closesocket(fd);
+ error_report_err(local_error);
return -1;
}
nc = qemu_new_net_client(&net_socket_info, peer, model, name);
s = DO_UPCAST(NetSocketState, nc, nc);
s->fd = -1;
- s->listen_fd = fd;
+ s->listen_fd = ret;
s->nc.link_down = true;
qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
+ qapi_free_SocketAddress(saddr);
return 0;
}
@@ -534,10 +523,15 @@ static int net_socket_connect_init(NetClientState *peer,
{
NetSocketState *s;
int fd, connected, ret;
- struct sockaddr_in saddr;
+ char *addr_str;
+ SocketAddress *saddr;
+ Error *local_error = NULL;
- if (parse_host_port(&saddr, host_str) < 0)
+ saddr = socket_parse(host_str, &local_error);
+ if (saddr == NULL) {
+ error_report_err(local_error);
return -1;
+ }
fd = qemu_socket(PF_INET, SOCK_STREAM, 0);
if (fd < 0) {
@@ -545,10 +539,9 @@ static int net_socket_connect_init(NetClientState *peer,
return -1;
}
qemu_set_nonblock(fd);
-
connected = 0;
for(;;) {
- ret = connect(fd, (struct sockaddr *)&saddr, sizeof(saddr));
+ ret = socket_connect(saddr, &local_error, NULL, NULL);
if (ret < 0) {
if (errno == EINTR || errno == EWOULDBLOCK) {
/* continue */
@@ -557,7 +550,7 @@ static int net_socket_connect_init(NetClientState *peer,
errno == EINVAL) {
break;
} else {
- perror("connect");
+ error_report_err(local_error);
closesocket(fd);
return -1;
}
@@ -569,9 +562,15 @@ static int net_socket_connect_init(NetClientState *peer,
s = net_socket_fd_init(peer, model, name, fd, connected);
if (!s)
return -1;
+
+ addr_str = socket_address_to_string(saddr, &local_error);
+ if (addr_str == NULL)
+ return -1;
+
snprintf(s->nc.info_str, sizeof(s->nc.info_str),
- "socket: connect to %s:%d",
- inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port));
+ "socket: connect to %s", addr_str);
+ qapi_free_SocketAddress(saddr);
+ g_free(addr_str);
return 0;
}
diff --git a/net/vhost-user.c b/net/vhost-user.c
index d72ce9b490..92f4cfd1b1 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -22,7 +22,7 @@ typedef struct VhostUserState {
NetClientState nc;
CharDriverState *chr;
VHostNetState *vhost_net;
- int watch;
+ guint watch;
uint64_t acked_features;
} VhostUserState;
@@ -151,6 +151,11 @@ static void vhost_user_cleanup(NetClientState *nc)
vhost_net_cleanup(s->vhost_net);
s->vhost_net = NULL;
}
+ if (s->chr) {
+ qemu_chr_add_handlers(s->chr, NULL, NULL, NULL, NULL);
+ qemu_chr_fe_release(s->chr);
+ s->chr = NULL;
+ }
qemu_purge_queued_packets(nc);
}
diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin
index e7a7e72e32..57fb4d88b1 100644
--- a/pc-bios/bios-256k.bin
+++ b/pc-bios/bios-256k.bin
Binary files differ
diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin
index b0ae502f6e..8a6869ff1b 100644
--- a/pc-bios/bios.bin
+++ b/pc-bios/bios.bin
Binary files differ
diff --git a/po/bg.po b/po/bg.po
new file mode 100644
index 0000000000..50478616ee
--- /dev/null
+++ b/po/bg.po
@@ -0,0 +1,90 @@
+# Bulgarian translation of qemu po-file.
+# Copyright (C) 2016 Alexander Shopov <ash@kambanaria.org>
+# This file is distributed under the same license as the qemu package.
+# Alexander Shopov <ash@kambanaria.org>, 2016.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: QEMU 2.6.50\n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2016-06-26 10:16+0300\n"
+"PO-Revision-Date: 2016-06-09 15:54+0300\n"
+"Last-Translator: Alexander Shopov <ash@kambanaria.org>\n"
+"Language-Team: Bulgarian <dict@ludost.net>\n"
+"Language: bg\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: ui/gtk.c:274
+msgid " - Press Ctrl+Alt+G to release grab"
+msgstr " — натиснете Ctrl+Alt+G, за да освободите фокуса"
+
+#: ui/gtk.c:278
+msgid " [Paused]"
+msgstr " [пауза]"
+
+#: ui/gtk.c:1906
+msgid "_Pause"
+msgstr "_Пауза"
+
+#: ui/gtk.c:1912
+msgid "_Reset"
+msgstr "_Рестартиране"
+
+#: ui/gtk.c:1915
+msgid "Power _Down"
+msgstr "_Изключване"
+
+#: ui/gtk.c:1921
+msgid "_Quit"
+msgstr "_Спиране на програмата"
+
+#: ui/gtk.c:2013
+msgid "_Fullscreen"
+msgstr "На _цял екран"
+
+#: ui/gtk.c:2016
+msgid "_Copy"
+msgstr "_Копиране"
+
+#: ui/gtk.c:2032
+msgid "Zoom _In"
+msgstr "_Увеличаване"
+
+#: ui/gtk.c:2039
+msgid "Zoom _Out"
+msgstr "_Намаляване"
+
+#: ui/gtk.c:2046
+msgid "Best _Fit"
+msgstr "По_местване"
+
+#: ui/gtk.c:2053
+msgid "Zoom To _Fit"
+msgstr "Напас_ване"
+
+#: ui/gtk.c:2059
+msgid "Grab On _Hover"
+msgstr "Прихващане при посо_чване"
+
+#: ui/gtk.c:2062
+msgid "_Grab Input"
+msgstr "Прихващане на _фокуса"
+
+#: ui/gtk.c:2091
+msgid "Show _Tabs"
+msgstr "Подпро_зорци"
+
+#: ui/gtk.c:2094
+msgid "Detach Tab"
+msgstr "Към самостоятелен подпрозорец"
+
+#: ui/gtk.c:2106
+msgid "_Machine"
+msgstr "_Машина"
+
+#: ui/gtk.c:2111
+msgid "_View"
+msgstr "_Изглед"
diff --git a/qapi-schema.json b/qapi-schema.json
index 0964eece6d..ba3bf14749 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -2986,11 +2986,14 @@
# @cpu-max: maximum number of CPUs supported by the machine type
# (since 1.5.0)
#
+# @hotpluggable-cpus: cpu hotplug via -device is supported (since 2.7.0)
+#
# Since: 1.2.0
##
{ 'struct': 'MachineInfo',
'data': { 'name': 'str', '*alias': 'str',
- '*is-default': 'bool', 'cpu-max': 'int' } }
+ '*is-default': 'bool', 'cpu-max': 'int',
+ 'hotpluggable-cpus': 'bool'} }
##
# @query-machines:
@@ -4079,8 +4082,9 @@
## @ACPISlotType
#
# @DIMM: memory slot
+# @CPU: logical CPU slot (since 2.7)
#
-{ 'enum': 'ACPISlotType', 'data': [ 'DIMM' ] }
+{ 'enum': 'ACPISlotType', 'data': [ 'DIMM', 'CPU' ] }
## @ACPIOSTInfo
#
@@ -4264,20 +4268,21 @@
# Note: currently there are 4 properties that could be present
# but management should be prepared to pass through other
# properties with device_add command to allow for future
-# interface extension.
+# interface extension. This also requires the filed names to be kept in
+# sync with the properties passed to -device/device_add.
#
-# @node: #optional NUMA node ID the CPU belongs to
-# @socket: #optional socket number within node/board the CPU belongs to
-# @core: #optional core number within socket the CPU belongs to
-# @thread: #optional thread number within core the CPU belongs to
+# @node-id: #optional NUMA node ID the CPU belongs to
+# @socket-id: #optional socket number within node/board the CPU belongs to
+# @core-id: #optional core number within socket the CPU belongs to
+# @thread-id: #optional thread number within core the CPU belongs to
#
# Since: 2.7
##
{ 'struct': 'CpuInstanceProperties',
- 'data': { '*node': 'int',
- '*socket': 'int',
- '*core': 'int',
- '*thread': 'int'
+ 'data': { '*node-id': 'int',
+ '*socket-id': 'int',
+ '*core-id': 'int',
+ '*thread-id': 'int'
}
}
diff --git a/qapi/crypto.json b/qapi/crypto.json
index 760d0c0577..4c4a3e07f4 100644
--- a/qapi/crypto.json
+++ b/qapi/crypto.json
@@ -42,12 +42,16 @@
#
# @md5: MD5. Should not be used in any new code, legacy compat only
# @sha1: SHA-1. Should not be used in any new code, legacy compat only
+# @sha224: SHA-224. (since 2.7)
# @sha256: SHA-256. Current recommended strong hash.
+# @sha384: SHA-384. (since 2.7)
+# @sha512: SHA-512. (since 2.7)
+# @ripemd160: RIPEMD-160. (since 2.7)
# Since: 2.6
##
{ 'enum': 'QCryptoHashAlgorithm',
'prefix': 'QCRYPTO_HASH_ALG',
- 'data': ['md5', 'sha1', 'sha256']}
+ 'data': ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512', 'ripemd160']}
##
diff --git a/qapi/string-input-visitor.c b/qapi/string-input-visitor.c
index 30b58791c9..b546e5f76a 100644
--- a/qapi/string-input-visitor.c
+++ b/qapi/string-input-visitor.c
@@ -61,8 +61,7 @@ static int parse_str(StringInputVisitor *siv, const char *name, Error **errp)
cur = g_malloc0(sizeof(*cur));
cur->begin = start;
cur->end = start + 1;
- siv->ranges = g_list_insert_sorted_merged(siv->ranges, cur,
- range_compare);
+ siv->ranges = range_list_insert(siv->ranges, cur);
cur = NULL;
str = NULL;
} else if (*endptr == '-') {
@@ -76,10 +75,7 @@ static int parse_str(StringInputVisitor *siv, const char *name, Error **errp)
cur = g_malloc0(sizeof(*cur));
cur->begin = start;
cur->end = end + 1;
- siv->ranges =
- g_list_insert_sorted_merged(siv->ranges,
- cur,
- range_compare);
+ siv->ranges = range_list_insert(siv->ranges, cur);
cur = NULL;
str = NULL;
} else if (*endptr == ',') {
@@ -87,10 +83,7 @@ static int parse_str(StringInputVisitor *siv, const char *name, Error **errp)
cur = g_malloc0(sizeof(*cur));
cur->begin = start;
cur->end = end + 1;
- siv->ranges =
- g_list_insert_sorted_merged(siv->ranges,
- cur,
- range_compare);
+ siv->ranges = range_list_insert(siv->ranges, cur);
cur = NULL;
} else {
goto error;
@@ -103,9 +96,7 @@ static int parse_str(StringInputVisitor *siv, const char *name, Error **errp)
cur = g_malloc0(sizeof(*cur));
cur->begin = start;
cur->end = start + 1;
- siv->ranges = g_list_insert_sorted_merged(siv->ranges,
- cur,
- range_compare);
+ siv->ranges = range_list_insert(siv->ranges, cur);
cur = NULL;
} else {
goto error;
diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c
index d01319628b..5ea395ab98 100644
--- a/qapi/string-output-visitor.c
+++ b/qapi/string-output-visitor.c
@@ -85,7 +85,7 @@ static void string_output_append(StringOutputVisitor *sov, int64_t a)
Range *r = g_malloc0(sizeof(*r));
r->begin = a;
r->end = a + 1;
- sov->ranges = g_list_insert_sorted_merged(sov->ranges, r, range_compare);
+ sov->ranges = range_list_insert(sov->ranges, r);
}
static void string_output_append_range(StringOutputVisitor *sov,
@@ -94,7 +94,7 @@ static void string_output_append_range(StringOutputVisitor *sov,
Range *r = g_malloc0(sizeof(*r));
r->begin = s;
r->end = e + 1;
- sov->ranges = g_list_insert_sorted_merged(sov->ranges, r, range_compare);
+ sov->ranges = range_list_insert(sov->ranges, r);
}
static void format_string(StringOutputVisitor *sov, Range *r, bool next,
diff --git a/qemu-char.c b/qemu-char.c
index 84f49acbac..b73969ddbd 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -3966,19 +3966,19 @@ void qemu_chr_fe_event(struct CharDriverState *chr, int event)
}
}
-int qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond,
- GIOFunc func, void *user_data)
+guint qemu_chr_fe_add_watch(CharDriverState *s, GIOCondition cond,
+ GIOFunc func, void *user_data)
{
GSource *src;
guint tag;
if (s->chr_add_watch == NULL) {
- return -ENOSYS;
+ return 0;
}
src = s->chr_add_watch(s, cond);
if (!src) {
- return -EINVAL;
+ return 0;
}
g_source_set_callback(src, (GSourceFunc)func, user_data, NULL);
@@ -4549,6 +4549,15 @@ void qmp_chardev_remove(const char *id, Error **errp)
qemu_chr_delete(chr);
}
+static void qemu_chr_cleanup(void)
+{
+ CharDriverState *chr, *tmp;
+
+ QTAILQ_FOREACH_SAFE(chr, &chardevs, next, tmp) {
+ qemu_chr_delete(chr);
+ }
+}
+
static void register_types(void)
{
register_char_driver("null", CHARDEV_BACKEND_KIND_NULL, NULL,
@@ -4595,6 +4604,8 @@ static void register_types(void)
* is specified
*/
qemu_add_machine_init_done_notifier(&muxes_realize_notify);
+
+ atexit(qemu_chr_cleanup);
}
type_init(register_types);
diff --git a/qemu-img.c b/qemu-img.c
index 14e2661a5c..3322a1e5fc 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -32,6 +32,7 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/error-report.h"
+#include "qemu/log.h"
#include "qom/object_interfaces.h"
#include "sysemu/sysemu.h"
#include "sysemu/block-backend.h"
@@ -39,6 +40,7 @@
#include "block/blockjob.h"
#include "block/qapi.h"
#include "crypto/init.h"
+#include "trace/control.h"
#include <getopt.h>
#define QEMU_IMG_VERSION "qemu-img version " QEMU_VERSION QEMU_PKGVERSION \
@@ -91,9 +93,14 @@ static void QEMU_NORETURN help(void)
{
const char *help_msg =
QEMU_IMG_VERSION
- "usage: qemu-img command [command options]\n"
+ "usage: qemu-img [standard options] command [command options]\n"
"QEMU disk image utility\n"
"\n"
+ " '-h', '--help' display this help and exit\n"
+ " '-V', '--version' output version information and exit\n"
+ " '-T', '--trace' [[enable=]<pattern>][,events=<file>][,file=<file>]\n"
+ " specify tracing options\n"
+ "\n"
"Command syntax:\n"
#define DEF(option, callback, arg_string) \
" " arg_string "\n"
@@ -3803,10 +3810,12 @@ int main(int argc, char **argv)
const img_cmd_t *cmd;
const char *cmdname;
Error *local_error = NULL;
+ char *trace_file = NULL;
int c;
static const struct option long_options[] = {
{"help", no_argument, 0, 'h'},
- {"version", no_argument, 0, 'v'},
+ {"version", no_argument, 0, 'V'},
+ {"trace", required_argument, NULL, 'T'},
{0, 0, 0, 0}
};
@@ -3829,27 +3838,48 @@ int main(int argc, char **argv)
if (argc < 2) {
error_exit("Not enough arguments");
}
- cmdname = argv[1];
qemu_add_opts(&qemu_object_opts);
qemu_add_opts(&qemu_source_opts);
+ qemu_add_opts(&qemu_trace_opts);
- /* find the command */
- for (cmd = img_cmds; cmd->name != NULL; cmd++) {
- if (!strcmp(cmdname, cmd->name)) {
- return cmd->handler(argc - 1, argv + 1);
+ while ((c = getopt_long(argc, argv, "+hVT:", long_options, NULL)) != -1) {
+ switch (c) {
+ case 'h':
+ help();
+ return 0;
+ case 'V':
+ printf(QEMU_IMG_VERSION);
+ return 0;
+ case 'T':
+ g_free(trace_file);
+ trace_file = trace_opt_parse(optarg);
+ break;
}
}
- c = getopt_long(argc, argv, "h", long_options, NULL);
+ cmdname = argv[optind];
- if (c == 'h') {
- help();
- }
- if (c == 'v') {
- printf(QEMU_IMG_VERSION);
+ /* reset getopt_long scanning */
+ argc -= optind;
+ if (argc < 1) {
return 0;
}
+ argv += optind;
+ optind = 1;
+
+ if (!trace_init_backends()) {
+ exit(1);
+ }
+ trace_init_file(trace_file);
+ qemu_set_log(LOG_TRACE);
+
+ /* find the command */
+ for (cmd = img_cmds; cmd->name != NULL; cmd++) {
+ if (!strcmp(cmdname, cmd->name)) {
+ return cmd->handler(argc, argv);
+ }
+ }
/* not found */
error_exit("Command not found: %s", cmdname);
diff --git a/qemu-img.texi b/qemu-img.texi
index cbe50e9b88..449a19c710 100644
--- a/qemu-img.texi
+++ b/qemu-img.texi
@@ -1,6 +1,6 @@
@example
@c man begin SYNOPSIS
-@command{qemu-img} @var{command} [@var{command} @var{options}]
+@command{qemu-img} [@var{standard} @var{options}] @var{command} [@var{command} @var{options}]
@c man end
@end example
@@ -16,6 +16,17 @@ inconsistent state.
@c man begin OPTIONS
+Standard options:
+@table @option
+@item -h, --help
+Display this help and exit
+@item -V, --version
+Display version information and exit
+@item -T, --trace [[enable=]@var{pattern}][,events=@var{file}][,file=@var{file}]
+@findex --trace
+@include qemu-option-trace.texi
+@end table
+
The following commands are supported:
@include qemu-img-cmds.texi
diff --git a/qemu-io.c b/qemu-io.c
index d977a6e553..db129eac5f 100644
--- a/qemu-io.c
+++ b/qemu-io.c
@@ -18,6 +18,7 @@
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qemu/readline.h"
+#include "qemu/log.h"
#include "qapi/qmp/qstring.h"
#include "qom/object_interfaces.h"
#include "sysemu/block-backend.h"
@@ -253,7 +254,9 @@ static void usage(const char *name)
" -k, --native-aio use kernel AIO implementation (on Linux only)\n"
" -t, --cache=MODE use the given cache mode for the image\n"
" -d, --discard=MODE use the given discard mode for the image\n"
-" -T, --trace FILE enable trace events listed in the given file\n"
+" -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n"
+" specify tracing options\n"
+" see qemu-img(1) man page for full description\n"
" -h, --help display this help and exit\n"
" -V, --version output version information and exit\n"
"\n"
@@ -458,6 +461,7 @@ int main(int argc, char **argv)
Error *local_error = NULL;
QDict *opts = NULL;
const char *format = NULL;
+ char *trace_file = NULL;
#ifdef CONFIG_POSIX
signal(SIGPIPE, SIG_IGN);
@@ -470,6 +474,7 @@ int main(int argc, char **argv)
module_call_init(MODULE_INIT_QOM);
qemu_add_opts(&qemu_object_opts);
+ qemu_add_opts(&qemu_trace_opts);
bdrv_init();
while ((c = getopt_long(argc, argv, sopt, lopt, &opt_index)) != -1) {
@@ -509,9 +514,8 @@ int main(int argc, char **argv)
}
break;
case 'T':
- if (!trace_init_backends()) {
- exit(1); /* error message will have been printed */
- }
+ g_free(trace_file);
+ trace_file = trace_opt_parse(optarg);
break;
case 'V':
printf("%s version %s\n", progname, QEMU_VERSION);
@@ -557,6 +561,12 @@ int main(int argc, char **argv)
exit(1);
}
+ if (!trace_init_backends()) {
+ exit(1);
+ }
+ trace_init_file(trace_file);
+ qemu_set_log(LOG_TRACE);
+
/* initialize commands */
qemuio_add_command(&quit_cmd);
qemuio_add_command(&open_cmd);
diff --git a/qemu-nbd.c b/qemu-nbd.c
index 9519db324b..321f02bd15 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -27,12 +27,14 @@
#include "qemu/error-report.h"
#include "qemu/config-file.h"
#include "qemu/bswap.h"
+#include "qemu/log.h"
#include "block/snapshot.h"
#include "qapi/util.h"
#include "qapi/qmp/qstring.h"
#include "qom/object_interfaces.h"
#include "io/channel-socket.h"
#include "crypto/init.h"
+#include "trace/control.h"
#include <getopt.h>
#include <libgen.h>
@@ -88,6 +90,8 @@ static void usage(const char *name)
"General purpose options:\n"
" --object type,id=ID,... define an object such as 'secret' for providing\n"
" passwords and/or encryption keys\n"
+" -T, --trace [[enable=]<pattern>][,events=<file>][,file=<file>]\n"
+" specify tracing options\n"
#ifdef __linux__
"Kernel NBD client support:\n"
" -c, --connect=DEV connect FILE to the local NBD device DEV\n"
@@ -470,7 +474,7 @@ int main(int argc, char **argv)
off_t fd_size;
QemuOpts *sn_opts = NULL;
const char *sn_id_or_name = NULL;
- const char *sopt = "hVb:o:p:rsnP:c:dvk:e:f:tl:x:";
+ const char *sopt = "hVb:o:p:rsnP:c:dvk:e:f:tl:x:T:";
struct option lopt[] = {
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
@@ -498,6 +502,7 @@ int main(int argc, char **argv)
{ "export-name", required_argument, NULL, 'x' },
{ "tls-creds", required_argument, NULL, QEMU_NBD_OPT_TLSCREDS },
{ "image-opts", no_argument, NULL, QEMU_NBD_OPT_IMAGE_OPTS },
+ { "trace", required_argument, NULL, 'T' },
{ NULL, 0, NULL, 0 }
};
int ch;
@@ -518,6 +523,7 @@ int main(int argc, char **argv)
const char *tlscredsid = NULL;
bool imageOpts = false;
bool writethrough = true;
+ char *trace_file = NULL;
/* The client thread uses SIGTERM to interrupt the server. A signal
* handler ensures that "qemu-nbd -v -c" exits with a nice status code.
@@ -531,6 +537,7 @@ int main(int argc, char **argv)
module_call_init(MODULE_INIT_QOM);
qemu_add_opts(&qemu_object_opts);
+ qemu_add_opts(&qemu_trace_opts);
qemu_init_exec_dir(argv[0]);
while ((ch = getopt_long(argc, argv, sopt, lopt, &opt_ind)) != -1) {
@@ -703,6 +710,10 @@ int main(int argc, char **argv)
case QEMU_NBD_OPT_IMAGE_OPTS:
imageOpts = true;
break;
+ case 'T':
+ g_free(trace_file);
+ trace_file = trace_opt_parse(optarg);
+ break;
}
}
@@ -718,6 +729,12 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ if (!trace_init_backends()) {
+ exit(1);
+ }
+ trace_init_file(trace_file);
+ qemu_set_log(LOG_TRACE);
+
if (tlscredsid) {
if (sockpath) {
error_report("TLS is only supported with IPv4/IPv6");
diff --git a/qemu-nbd.texi b/qemu-nbd.texi
index 9f23343450..91ebf04b5b 100644
--- a/qemu-nbd.texi
+++ b/qemu-nbd.texi
@@ -92,6 +92,9 @@ Display extra debugging information
Display this help and exit
@item -V, --version
Display version information and exit
+@item -T, --trace [[enable=]@var{pattern}][,events=@var{file}][,file=@var{file}]
+@findex --trace
+@include qemu-option-trace.texi
@end table
@c man end
diff --git a/qemu-option-trace.texi b/qemu-option-trace.texi
new file mode 100644
index 0000000000..693ab5a3e1
--- /dev/null
+++ b/qemu-option-trace.texi
@@ -0,0 +1,25 @@
+Specify tracing options.
+
+@table @option
+@item [enable=]@var{pattern}
+Immediately enable events matching @var{pattern}.
+The file must contain one event name (as listed in the @file{trace-events-all}
+file) per line; globbing patterns are accepted too. This option is only
+available if QEMU has been compiled with the @var{simple}, @var{stderr}
+or @var{ftrace} tracing backend. To specify multiple events or patterns,
+specify the @option{-trace} option multiple times.
+
+Use @code{-trace help} to print a list of names of trace points.
+
+@item events=@var{file}
+Immediately enable events listed in @var{file}.
+The file must contain one event name (as listed in the @file{trace-events-all}
+file) per line; globbing patterns are accepted too. This option is only
+available if QEMU has been compiled with the @var{simple}, @var{stderr} or
+@var{ftrace} tracing backend.
+
+@item file=@var{file}
+Log output traces to @var{file}.
+This option is only available if QEMU has been compiled with
+the @var{simple} tracing backend.
+@end table
diff --git a/qemu-options.hx b/qemu-options.hx
index 44c658fd4e..a95a936e55 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -3669,34 +3669,9 @@ DEF("trace", HAS_ARG, QEMU_OPTION_trace,
STEXI
HXCOMM This line is not accurate, as some sub-options are backend-specific but
HXCOMM HX does not support conditional compilation of text.
-@item -trace [events=@var{file}][,file=@var{file}]
+@item -trace [[enable=]@var{pattern}][,events=@var{file}][,file=@var{file}]
@findex -trace
-
-Specify tracing options.
-
-@table @option
-@item [enable=]@var{pattern}
-Immediately enable events matching @var{pattern}.
-The file must contain one event name (as listed in the @file{trace-events-all}
-file) per line; globbing patterns are accepted too. This option is only
-available if QEMU has been compiled with the @var{simple}, @var{stderr}
-or @var{ftrace} tracing backend. To specify multiple events or patterns,
-specify the @option{-trace} option multiple times.
-
-Use @code{-trace help} to print a list of names of trace points.
-
-@item events=@var{file}
-Immediately enable events listed in @var{file}.
-The file must contain one event name (as listed in the @file{trace-events-all}
-file) per line; globbing patterns are accepted too. This option is only
-available if QEMU has been compiled with the @var{simple}, @var{stderr} or
-@var{ftrace} tracing backend.
-
-@item file=@var{file}
-Log output traces to @var{file}.
-This option is only available if QEMU has been compiled with
-the @var{simple} tracing backend.
-@end table
+@include qemu-option-trace.texi
ETEXI
HXCOMM Internal use
diff --git a/qmp-commands.hx b/qmp-commands.hx
index b444c2025b..6937e83cbd 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -4978,8 +4978,8 @@ Example for pseries machine type started with
-> { "execute": "query-hotpluggable-cpus" }
<- {"return": [
- { "props": { "core": 8 }, "type": "POWER8-spapr-cpu-core",
+ { "props": { "core-id": 8 }, "type": "POWER8-spapr-cpu-core",
"vcpus-count": 1 },
- { "props": { "core": 0 }, "type": "POWER8-spapr-cpu-core",
+ { "props": { "core-id": 0 }, "type": "POWER8-spapr-cpu-core",
"vcpus-count": 1, "qom-path": "/machine/unattached/device[0]"}
]}'
diff --git a/qobject/json-lexer.c b/qobject/json-lexer.c
index 496374d9ab..af4a75e05b 100644
--- a/qobject/json-lexer.c
+++ b/qobject/json-lexer.c
@@ -18,11 +18,20 @@
#define MAX_TOKEN_SIZE (64ULL << 20)
/*
- * \"([^\\\"]|(\\\"\\'\\\\\\/\\b\\f\\n\\r\\t\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]))*\"
- * '([^\\']|(\\\"\\'\\\\\\/\\b\\f\\n\\r\\t\\u[0-9a-fA-F][0-9a-fA-F][0-9a-fA-F][0-9a-fA-F]))*'
- * 0|([1-9][0-9]*(.[0-9]+)?([eE]([-+])?[0-9]+))
+ * Required by JSON (RFC 7159):
+ *
+ * \"([^\\\"]|\\[\"'\\/bfnrt]|\\u[0-9a-fA-F]{4})*\"
+ * -?(0|[1-9][0-9]*)(.[0-9]+)?([eE][-+]?[0-9]+)?
* [{}\[\],:]
- * [a-z]+
+ * [a-z]+ # covers null, true, false
+ *
+ * Extension of '' strings:
+ *
+ * '([^\\']|\\[\"'\\/bfnrt]|\\u[0-9a-fA-F]{4})*'
+ *
+ * Extension for vararg handling in JSON construction:
+ *
+ * %((l|ll|I64)?d|[ipsf])
*
*/
@@ -213,7 +222,7 @@ static const uint8_t json_lexer[][256] = {
['\t'] = IN_WHITESPACE,
['\r'] = IN_WHITESPACE,
['\n'] = IN_WHITESPACE,
- },
+ },
/* escape */
[IN_ESCAPE_LL] = {
diff --git a/qobject/json-streamer.c b/qobject/json-streamer.c
index 02516853a1..7164390cf5 100644
--- a/qobject/json-streamer.c
+++ b/qobject/json-streamer.c
@@ -20,9 +20,15 @@
#define MAX_TOKEN_COUNT (2ULL << 20)
#define MAX_NESTING (1ULL << 10)
+static void json_message_free_token(void *token, void *opaque)
+{
+ g_free(token);
+}
+
static void json_message_free_tokens(JSONMessageParser *parser)
{
if (parser->tokens) {
+ g_queue_foreach(parser->tokens, json_message_free_token, NULL);
g_queue_free(parser->tokens);
parser->tokens = NULL;
}
diff --git a/roms/config.seabios-128k b/roms/config.seabios-128k
index 0a9da77a5e..93203af0de 100644
--- a/roms/config.seabios-128k
+++ b/roms/config.seabios-128k
@@ -2,9 +2,11 @@
# need to turn off features (xhci,uas) to make it fit into 128k
CONFIG_QEMU=y
CONFIG_ROM_SIZE=128
+CONFIG_BOOTSPLASH=n
CONFIG_XEN=n
CONFIG_USB_OHCI=n
CONFIG_USB_XHCI=n
CONFIG_USB_UAS=n
CONFIG_SDCARD=n
CONFIG_TCGBIOS=n
+CONFIG_MPT_SCSI=n
diff --git a/roms/seabios b/roms/seabios
-Subproject b3ef39f532db52bf17457ba931da758eeb38d6b
+Subproject e2fc41e24ee0ada60fc511d60b15a41b294538b
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c939a325bc..cf32c8f5fa 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2453,7 +2453,7 @@ sub process {
}
# recommend qemu_strto* over strto* for numeric conversions
- if ($line =~ /\b(strto[^k].*?)\s*\(/) {
+ if ($line =~ /\b(strto[^kd].*?)\s*\(/) {
WARN("consider using qemu_$1 in preference to $1\n" . $herecurr);
}
# check for module_init(), use category-specific init macros explicitly please
diff --git a/scripts/qapi-visit.py b/scripts/qapi-visit.py
index 70ea8caef5..ffb635c508 100644
--- a/scripts/qapi-visit.py
+++ b/scripts/qapi-visit.py
@@ -172,6 +172,9 @@ void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error
if (err) {
goto out;
}
+ if (!*obj) {
+ goto out_obj;
+ }
switch ((*obj)->type) {
''',
c_name=c_name(name), promote_int=promote_int)
@@ -206,10 +209,13 @@ void visit_type_%(c_name)s(Visitor *v, const char *name, %(c_name)s **obj, Error
''')
ret += mcgen('''
+ case QTYPE_NONE:
+ abort();
default:
error_setg(&err, QERR_INVALID_PARAMETER_TYPE, name ? name : "null",
"%(name)s");
}
+out_obj:
visit_end_alternate(v);
if (err && visit_is_input(v)) {
qapi_free_%(c_name)s(*obj);
diff --git a/slirp/Makefile.objs b/slirp/Makefile.objs
index 6748e4f60a..1baa1f1c7c 100644
--- a/slirp/Makefile.objs
+++ b/slirp/Makefile.objs
@@ -1,5 +1,5 @@
common-obj-y = cksum.o if.o ip_icmp.o ip6_icmp.o ip6_input.o ip6_output.o \
- ip_input.o ip_output.o dnssearch.o
+ ip_input.o ip_output.o dnssearch.o dhcpv6.o
common-obj-y += slirp.o mbuf.o misc.o sbuf.o socket.o tcp_input.o tcp_output.o
common-obj-y += tcp_subr.o tcp_timer.o udp.o udp6.o bootp.o tftp.o arp_table.o \
ndp_table.o
diff --git a/slirp/dhcpv6.c b/slirp/dhcpv6.c
new file mode 100644
index 0000000000..02c51c7756
--- /dev/null
+++ b/slirp/dhcpv6.c
@@ -0,0 +1,209 @@
+/*
+ * SLIRP stateless DHCPv6
+ *
+ * We only support stateless DHCPv6, e.g. for network booting.
+ * See RFC 3315, RFC 3736, RFC 3646 and RFC 5970 for details.
+ *
+ * Copyright 2016 Thomas Huth, Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "slirp.h"
+#include "dhcpv6.h"
+
+/* DHCPv6 message types */
+#define MSGTYPE_REPLY 7
+#define MSGTYPE_INFO_REQUEST 11
+
+/* DHCPv6 option types */
+#define OPTION_CLIENTID 1
+#define OPTION_IAADDR 5
+#define OPTION_ORO 6
+#define OPTION_DNS_SERVERS 23
+#define OPTION_BOOTFILE_URL 59
+
+struct requested_infos {
+ uint8_t *client_id;
+ int client_id_len;
+ bool want_dns;
+ bool want_boot_url;
+};
+
+/**
+ * Analyze the info request message sent by the client to see what data it
+ * provided and what it wants to have. The information is gathered in the
+ * "requested_infos" struct. Note that client_id (if provided) points into
+ * the odata region, thus the caller must keep odata valid as long as it
+ * needs to access the requested_infos struct.
+ */
+static int dhcpv6_parse_info_request(uint8_t *odata, int olen,
+ struct requested_infos *ri)
+{
+ int i, req_opt;
+
+ while (olen > 4) {
+ /* Parse one option */
+ int option = odata[0] << 8 | odata[1];
+ int len = odata[2] << 8 | odata[3];
+
+ if (len + 4 > olen) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Guest sent bad DHCPv6 packet!\n");
+ return -E2BIG;
+ }
+
+ switch (option) {
+ case OPTION_IAADDR:
+ /* According to RFC3315, we must discard requests with IA option */
+ return -EINVAL;
+ case OPTION_CLIENTID:
+ if (len > 256) {
+ /* Avoid very long IDs which could cause problems later */
+ return -E2BIG;
+ }
+ ri->client_id = odata + 4;
+ ri->client_id_len = len;
+ break;
+ case OPTION_ORO: /* Option request option */
+ if (len & 1) {
+ return -EINVAL;
+ }
+ /* Check which options the client wants to have */
+ for (i = 0; i < len; i += 2) {
+ req_opt = odata[4 + i] << 8 | odata[4 + i + 1];
+ switch (req_opt) {
+ case OPTION_DNS_SERVERS:
+ ri->want_dns = true;
+ break;
+ case OPTION_BOOTFILE_URL:
+ ri->want_boot_url = true;
+ break;
+ default:
+ DEBUG_MISC((dfd, "dhcpv6: Unsupported option request %d\n",
+ req_opt));
+ }
+ }
+ break;
+ default:
+ DEBUG_MISC((dfd, "dhcpv6 info req: Unsupported option %d, len=%d\n",
+ option, len));
+ }
+
+ odata += len + 4;
+ olen -= len + 4;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Handle information request messages
+ */
+static void dhcpv6_info_request(Slirp *slirp, struct sockaddr_in6 *srcsas,
+ uint32_t xid, uint8_t *odata, int olen)
+{
+ struct requested_infos ri = { NULL };
+ struct sockaddr_in6 sa6, da6;
+ struct mbuf *m;
+ uint8_t *resp;
+
+ if (dhcpv6_parse_info_request(odata, olen, &ri) < 0) {
+ return;
+ }
+
+ m = m_get(slirp);
+ if (!m) {
+ return;
+ }
+ memset(m->m_data, 0, m->m_size);
+ m->m_data += IF_MAXLINKHDR;
+ resp = (uint8_t *)m->m_data + sizeof(struct ip6) + sizeof(struct udphdr);
+
+ /* Fill in response */
+ *resp++ = MSGTYPE_REPLY;
+ *resp++ = (uint8_t)(xid >> 16);
+ *resp++ = (uint8_t)(xid >> 8);
+ *resp++ = (uint8_t)xid;
+
+ if (ri.client_id) {
+ *resp++ = OPTION_CLIENTID >> 8; /* option-code high byte */
+ *resp++ = OPTION_CLIENTID; /* option-code low byte */
+ *resp++ = ri.client_id_len >> 8; /* option-len high byte */
+ *resp++ = ri.client_id_len; /* option-len low byte */
+ memcpy(resp, ri.client_id, ri.client_id_len);
+ resp += ri.client_id_len;
+ }
+ if (ri.want_dns) {
+ *resp++ = OPTION_DNS_SERVERS >> 8; /* option-code high byte */
+ *resp++ = OPTION_DNS_SERVERS; /* option-code low byte */
+ *resp++ = 0; /* option-len high byte */
+ *resp++ = 16; /* option-len low byte */
+ memcpy(resp, &slirp->vnameserver_addr6, 16);
+ resp += 16;
+ }
+ if (ri.want_boot_url) {
+ uint8_t *sa = slirp->vhost_addr6.s6_addr;
+ int slen, smaxlen;
+
+ *resp++ = OPTION_BOOTFILE_URL >> 8; /* option-code high byte */
+ *resp++ = OPTION_BOOTFILE_URL; /* option-code low byte */
+ smaxlen = (uint8_t *)m->m_data + IF_MTU - (resp + 2);
+ slen = snprintf((char *)resp + 2, smaxlen,
+ "tftp://[%02x%02x:%02x%02x:%02x%02x:%02x%02x:"
+ "%02x%02x:%02x%02x:%02x%02x:%02x%02x]/%s",
+ sa[0], sa[1], sa[2], sa[3], sa[4], sa[5], sa[6], sa[7],
+ sa[8], sa[9], sa[10], sa[11], sa[12], sa[13], sa[14],
+ sa[15], slirp->bootp_filename);
+ slen = min(slen, smaxlen);
+ *resp++ = slen >> 8; /* option-len high byte */
+ *resp++ = slen; /* option-len low byte */
+ resp += slen;
+ }
+
+ sa6.sin6_addr = slirp->vhost_addr6;
+ sa6.sin6_port = DHCPV6_SERVER_PORT;
+ da6.sin6_addr = srcsas->sin6_addr;
+ da6.sin6_port = srcsas->sin6_port;
+ m->m_data += sizeof(struct ip6) + sizeof(struct udphdr);
+ m->m_len = resp - (uint8_t *)m->m_data;
+ udp6_output(NULL, m, &sa6, &da6);
+}
+
+/**
+ * Handle DHCPv6 messages sent by the client
+ */
+void dhcpv6_input(struct sockaddr_in6 *srcsas, struct mbuf *m)
+{
+ uint8_t *data = (uint8_t *)m->m_data + sizeof(struct udphdr);
+ int data_len = m->m_len - sizeof(struct udphdr);
+ uint32_t xid;
+
+ if (data_len < 4) {
+ return;
+ }
+
+ xid = ntohl(*(uint32_t *)data) & 0xffffff;
+
+ switch (data[0]) {
+ case MSGTYPE_INFO_REQUEST:
+ dhcpv6_info_request(m->slirp, srcsas, xid, &data[4], data_len - 4);
+ break;
+ default:
+ DEBUG_MISC((dfd, "dhcpv6_input: Unsupported message type 0x%x\n",
+ data[0]));
+ }
+}
diff --git a/slirp/dhcpv6.h b/slirp/dhcpv6.h
new file mode 100644
index 0000000000..9189cd3f2d
--- /dev/null
+++ b/slirp/dhcpv6.h
@@ -0,0 +1,22 @@
+/*
+ * Definitions and prototypes for SLIRP stateless DHCPv6
+ *
+ * Copyright 2016 Thomas Huth, Red Hat Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2
+ * or later. See the COPYING file in the top-level directory.
+ */
+#ifndef SLIRP_DHCPV6_H
+#define SLIRP_DHCPV6_H
+
+#define DHCPV6_SERVER_PORT 547
+
+#define ALLDHCP_MULTICAST { .s6_addr = \
+ { 0xff, 0x02, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x01, 0x00, 0x02 } }
+
+void dhcpv6_input(struct sockaddr_in6 *srcsas, struct mbuf *m);
+
+#endif
diff --git a/slirp/ip6.h b/slirp/ip6.h
index 8ddfa242c4..da23de66f1 100644
--- a/slirp/ip6.h
+++ b/slirp/ip6.h
@@ -26,6 +26,12 @@
0x00, 0x00, 0x00, 0x00,\
0x00, 0x00, 0x00, 0x02 } }
+#define ZERO_ADDR { .s6_addr = \
+ { 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00 } }
+
static inline bool in6_equal(const struct in6_addr *a, const struct in6_addr *b)
{
return memcmp(a, b, sizeof(*a)) == 0;
@@ -84,6 +90,9 @@ static inline bool in6_equal_mach(const struct in6_addr *a,
#define in6_solicitednode_multicast(a)\
(in6_equal_net(a, &(struct in6_addr)SOLICITED_NODE_PREFIX, 104))
+#define in6_zero(a)\
+ (in6_equal(a, &(struct in6_addr)ZERO_ADDR))
+
/* Compute emulated host MAC address from its ipv6 address */
static inline void in6_compute_ethaddr(struct in6_addr ip,
uint8_t eth[ETH_ALEN])
diff --git a/slirp/ip6_icmp.c b/slirp/ip6_icmp.c
index 48016a9f85..6d18e28985 100644
--- a/slirp/ip6_icmp.c
+++ b/slirp/ip6_icmp.c
@@ -148,7 +148,11 @@ void ndp_send_ra(Slirp *slirp)
rip->ip_nh = IPPROTO_ICMPV6;
rip->ip_pl = htons(ICMP6_NDP_RA_MINLEN
+ NDPOPT_LINKLAYER_LEN
- + NDPOPT_PREFIXINFO_LEN);
+ + NDPOPT_PREFIXINFO_LEN
+#ifndef _WIN32
+ + NDPOPT_RDNSS_LEN
+#endif
+ );
t->m_len = sizeof(struct ip6) + ntohs(rip->ip_pl);
/* Build ICMPv6 packet */
@@ -166,16 +170,16 @@ void ndp_send_ra(Slirp *slirp)
ricmp->icmp6_nra.lifetime = htons(NDP_AdvDefaultLifetime);
ricmp->icmp6_nra.reach_time = htonl(NDP_AdvReachableTime);
ricmp->icmp6_nra.retrans_time = htonl(NDP_AdvRetransTime);
+ t->m_data += ICMP6_NDP_RA_MINLEN;
/* Source link-layer address (NDP option) */
- t->m_data += ICMP6_NDP_RA_MINLEN;
struct ndpopt *opt = mtod(t, struct ndpopt *);
opt->ndpopt_type = NDPOPT_LINKLAYER_SOURCE;
opt->ndpopt_len = NDPOPT_LINKLAYER_LEN / 8;
in6_compute_ethaddr(rip->ip_src, opt->ndpopt_linklayer);
+ t->m_data += NDPOPT_LINKLAYER_LEN;
/* Prefix information (NDP option) */
- t->m_data += NDPOPT_LINKLAYER_LEN;
struct ndpopt *opt2 = mtod(t, struct ndpopt *);
opt2->ndpopt_type = NDPOPT_PREFIX_INFO;
opt2->ndpopt_len = NDPOPT_PREFIXINFO_LEN / 8;
@@ -187,8 +191,25 @@ void ndp_send_ra(Slirp *slirp)
opt2->ndpopt_prefixinfo.pref_lt = htonl(NDP_AdvPrefLifetime);
opt2->ndpopt_prefixinfo.reserved2 = 0;
opt2->ndpopt_prefixinfo.prefix = slirp->vprefix_addr6;
+ t->m_data += NDPOPT_PREFIXINFO_LEN;
+
+#ifndef _WIN32
+ /* Prefix information (NDP option) */
+ /* disabled for windows for now, until get_dns6_addr is implemented */
+ struct ndpopt *opt3 = mtod(t, struct ndpopt *);
+ opt3->ndpopt_type = NDPOPT_RDNSS;
+ opt3->ndpopt_len = NDPOPT_RDNSS_LEN / 8;
+ opt3->ndpopt_rdnss.reserved = 0;
+ opt3->ndpopt_rdnss.lifetime = htonl(2 * NDP_MaxRtrAdvInterval);
+ opt3->ndpopt_rdnss.addr = slirp->vnameserver_addr6;
+ t->m_data += NDPOPT_RDNSS_LEN;
+#endif
/* ICMPv6 Checksum */
+#ifndef _WIN32
+ t->m_data -= NDPOPT_RDNSS_LEN;
+#endif
+ t->m_data -= NDPOPT_PREFIXINFO_LEN;
t->m_data -= NDPOPT_LINKLAYER_LEN;
t->m_data -= ICMP6_NDP_RA_MINLEN;
t->m_data -= sizeof(struct ip6);
diff --git a/slirp/ip6_icmp.h b/slirp/ip6_icmp.h
index 9460bf837a..2282d29076 100644
--- a/slirp/ip6_icmp.h
+++ b/slirp/ip6_icmp.h
@@ -122,6 +122,7 @@ struct ndpopt {
uint8_t ndpopt_len; /* /!\ In units of 8 octets */
union {
unsigned char linklayer_addr[6]; /* Source/Target Link-layer */
+#define ndpopt_linklayer ndpopt_body.linklayer_addr
struct prefixinfo { /* Prefix Information */
uint8_t prefix_length;
#ifdef HOST_WORDS_BIGENDIAN
@@ -134,19 +135,26 @@ struct ndpopt {
uint32_t reserved2;
struct in6_addr prefix;
} QEMU_PACKED prefixinfo;
- } ndpopt_body;
-#define ndpopt_linklayer ndpopt_body.linklayer_addr
#define ndpopt_prefixinfo ndpopt_body.prefixinfo
+ struct rdnss {
+ uint16_t reserved;
+ uint32_t lifetime;
+ struct in6_addr addr;
+ } QEMU_PACKED rdnss;
+#define ndpopt_rdnss ndpopt_body.rdnss
+ } ndpopt_body;
} QEMU_PACKED;
/* NDP options type */
#define NDPOPT_LINKLAYER_SOURCE 1 /* Source Link-Layer Address */
#define NDPOPT_LINKLAYER_TARGET 2 /* Target Link-Layer Address */
#define NDPOPT_PREFIX_INFO 3 /* Prefix Information */
+#define NDPOPT_RDNSS 25 /* Recursive DNS Server Address */
/* NDP options size, in octets. */
#define NDPOPT_LINKLAYER_LEN 8
#define NDPOPT_PREFIXINFO_LEN 32
+#define NDPOPT_RDNSS_LEN 24
/*
* Definition of type and code field values.
diff --git a/slirp/libslirp.h b/slirp/libslirp.h
index 127aa41d40..b6fc584219 100644
--- a/slirp/libslirp.h
+++ b/slirp/libslirp.h
@@ -7,6 +7,7 @@ struct Slirp;
typedef struct Slirp Slirp;
int get_dns_addr(struct in_addr *pdns_addr);
+int get_dns6_addr(struct in6_addr *pdns6_addr, uint32_t *scope_id);
Slirp *slirp_init(int restricted, bool in_enabled, struct in_addr vnetwork,
struct in_addr vnetmask, struct in_addr vhost,
diff --git a/slirp/slirp.c b/slirp/slirp.c
index 9f4bea3d3b..7eb183d0e9 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -30,6 +30,10 @@
#include "hw/hw.h"
#include "qemu/cutils.h"
+#ifndef _WIN32
+#include <net/if.h>
+#endif
+
/* host loopback address */
struct in_addr loopback_addr;
/* host loopback network mask */
@@ -46,7 +50,13 @@ static QTAILQ_HEAD(slirp_instances, Slirp) slirp_instances =
QTAILQ_HEAD_INITIALIZER(slirp_instances);
static struct in_addr dns_addr;
+#ifndef _WIN32
+static struct in6_addr dns6_addr;
+#endif
static u_int dns_addr_time;
+#ifndef _WIN32
+static u_int dns6_addr_time;
+#endif
#define TIMEOUT_FAST 2 /* milliseconds */
#define TIMEOUT_SLOW 499 /* milliseconds */
@@ -100,6 +110,11 @@ int get_dns_addr(struct in_addr *pdns_addr)
return 0;
}
+int get_dns6_addr(struct in6_addr *pdns6_addr, uint32_t *scope_id)
+{
+ return -1;
+}
+
static void winsock_cleanup(void)
{
WSACleanup();
@@ -107,33 +122,39 @@ static void winsock_cleanup(void)
#else
-static struct stat dns_addr_stat;
+static int get_dns_addr_cached(void *pdns_addr, void *cached_addr,
+ socklen_t addrlen,
+ struct stat *cached_stat, u_int *cached_time)
+{
+ struct stat old_stat;
+ if (curtime - *cached_time < TIMEOUT_DEFAULT) {
+ memcpy(pdns_addr, cached_addr, addrlen);
+ return 0;
+ }
+ old_stat = *cached_stat;
+ if (stat("/etc/resolv.conf", cached_stat) != 0) {
+ return -1;
+ }
+ if (cached_stat->st_dev == old_stat.st_dev
+ && cached_stat->st_ino == old_stat.st_ino
+ && cached_stat->st_size == old_stat.st_size
+ && cached_stat->st_mtime == old_stat.st_mtime) {
+ memcpy(pdns_addr, cached_addr, addrlen);
+ return 0;
+ }
+ return 1;
+}
-int get_dns_addr(struct in_addr *pdns_addr)
+static int get_dns_addr_resolv_conf(int af, void *pdns_addr, void *cached_addr,
+ socklen_t addrlen, uint32_t *scope_id,
+ u_int *cached_time)
{
char buff[512];
char buff2[257];
FILE *f;
int found = 0;
- struct in_addr tmp_addr;
-
- if (dns_addr.s_addr != 0) {
- struct stat old_stat;
- if ((curtime - dns_addr_time) < TIMEOUT_DEFAULT) {
- *pdns_addr = dns_addr;
- return 0;
- }
- old_stat = dns_addr_stat;
- if (stat("/etc/resolv.conf", &dns_addr_stat) != 0)
- return -1;
- if ((dns_addr_stat.st_dev == old_stat.st_dev)
- && (dns_addr_stat.st_ino == old_stat.st_ino)
- && (dns_addr_stat.st_size == old_stat.st_size)
- && (dns_addr_stat.st_mtime == old_stat.st_mtime)) {
- *pdns_addr = dns_addr;
- return 0;
- }
- }
+ void *tmp_addr = alloca(addrlen);
+ unsigned if_index;
f = fopen("/etc/resolv.conf", "r");
if (!f)
@@ -144,13 +165,25 @@ int get_dns_addr(struct in_addr *pdns_addr)
#endif
while (fgets(buff, 512, f) != NULL) {
if (sscanf(buff, "nameserver%*[ \t]%256s", buff2) == 1) {
- if (!inet_aton(buff2, &tmp_addr))
+ char *c = strchr(buff2, '%');
+ if (c) {
+ if_index = if_nametoindex(c + 1);
+ *c = '\0';
+ } else {
+ if_index = 0;
+ }
+
+ if (!inet_pton(af, buff2, tmp_addr)) {
continue;
+ }
/* If it's the first one, set it to dns_addr */
if (!found) {
- *pdns_addr = tmp_addr;
- dns_addr = tmp_addr;
- dns_addr_time = curtime;
+ memcpy(pdns_addr, tmp_addr, addrlen);
+ memcpy(cached_addr, tmp_addr, addrlen);
+ if (scope_id) {
+ *scope_id = if_index;
+ }
+ *cached_time = curtime;
}
#ifdef DEBUG
else
@@ -163,8 +196,14 @@ int get_dns_addr(struct in_addr *pdns_addr)
break;
}
#ifdef DEBUG
- else
- fprintf(stderr, "%s", inet_ntoa(tmp_addr));
+ else {
+ char s[INET6_ADDRSTRLEN];
+ char *res = inet_ntop(af, tmp_addr, s, sizeof(s));
+ if (!res) {
+ res = "(string conversion error)";
+ }
+ fprintf(stderr, "%s", res);
+ }
#endif
}
}
@@ -174,6 +213,39 @@ int get_dns_addr(struct in_addr *pdns_addr)
return 0;
}
+int get_dns_addr(struct in_addr *pdns_addr)
+{
+ static struct stat dns_addr_stat;
+
+ if (dns_addr.s_addr != 0) {
+ int ret;
+ ret = get_dns_addr_cached(pdns_addr, &dns_addr, sizeof(dns_addr),
+ &dns_addr_stat, &dns_addr_time);
+ if (ret <= 0) {
+ return ret;
+ }
+ }
+ return get_dns_addr_resolv_conf(AF_INET, pdns_addr, &dns_addr,
+ sizeof(dns_addr), NULL, &dns_addr_time);
+}
+
+int get_dns6_addr(struct in6_addr *pdns6_addr, uint32_t *scope_id)
+{
+ static struct stat dns6_addr_stat;
+
+ if (!in6_zero(&dns6_addr)) {
+ int ret;
+ ret = get_dns_addr_cached(pdns6_addr, &dns6_addr, sizeof(dns6_addr),
+ &dns6_addr_stat, &dns6_addr_time);
+ if (ret <= 0) {
+ return ret;
+ }
+ }
+ return get_dns_addr_resolv_conf(AF_INET6, pdns6_addr, &dns6_addr,
+ sizeof(dns6_addr),
+ scope_id, &dns6_addr_time);
+}
+
#endif
static void slirp_init_once(void)
diff --git a/slirp/socket.c b/slirp/socket.c
index b336586c7b..02e89ce1f2 100644
--- a/slirp/socket.c
+++ b/slirp/socket.c
@@ -816,9 +816,12 @@ void sotranslate_out(struct socket *so, struct sockaddr_storage *addr)
if (in6_equal_net(&so->so_faddr6, &slirp->vprefix_addr6,
slirp->vprefix_len)) {
if (in6_equal(&so->so_faddr6, &slirp->vnameserver_addr6)) {
- /*if (get_dns_addr(&addr) < 0) {*/ /* TODO */
+ uint32_t scope_id;
+ if (get_dns6_addr(&sin6->sin6_addr, &scope_id) >= 0) {
+ sin6->sin6_scope_id = scope_id;
+ } else {
sin6->sin6_addr = in6addr_loopback;
- /*}*/
+ }
} else {
sin6->sin6_addr = in6addr_loopback;
}
diff --git a/slirp/tftp.c b/slirp/tftp.c
index 12b5ff6e25..367340222d 100644
--- a/slirp/tftp.c
+++ b/slirp/tftp.c
@@ -208,8 +208,6 @@ static void tftp_send_error(struct tftp_session *spt,
goto out;
}
- memset(m->m_data, 0, m->m_size);
-
tp = tftp_prep_mbuf_data(spt, m);
tp->tp_op = htons(TFTP_ERROR);
@@ -237,8 +235,6 @@ static void tftp_send_next_block(struct tftp_session *spt,
return;
}
- memset(m->m_data, 0, m->m_size);
-
tp = tftp_prep_mbuf_data(spt, m);
tp->tp_op = htons(TFTP_DATA);
diff --git a/slirp/udp6.c b/slirp/udp6.c
index 94efb13240..9fa314bc2d 100644
--- a/slirp/udp6.c
+++ b/slirp/udp6.c
@@ -7,6 +7,7 @@
#include "qemu-common.h"
#include "slirp.h"
#include "udp.h"
+#include "dhcpv6.h"
void udp6_input(struct mbuf *m)
{
@@ -61,7 +62,17 @@ void udp6_input(struct mbuf *m)
lhost.sin6_addr = ip->ip_src;
lhost.sin6_port = uh->uh_sport;
- /* TODO handle DHCP/BOOTP */
+ /* handle DHCPv6 */
+ if (ntohs(uh->uh_dport) == DHCPV6_SERVER_PORT &&
+ (in6_equal(&ip->ip_dst, &slirp->vhost_addr6) ||
+ in6_equal(&ip->ip_dst, &(struct in6_addr)ALLDHCP_MULTICAST))) {
+ m->m_data += iphlen;
+ m->m_len -= iphlen;
+ dhcpv6_input(&lhost, m);
+ m->m_data -= iphlen;
+ m->m_len += iphlen;
+ goto bad;
+ }
/* handle TFTP */
if (ntohs(uh->uh_dport) == TFTP_SERVER &&
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
index 4b258a6731..7cdcad4fdb 100644
--- a/stubs/Makefile.objs
+++ b/stubs/Makefile.objs
@@ -41,3 +41,6 @@ stub-obj-y += target-monitor-defs.o
stub-obj-y += target-get-monitor-def.o
stub-obj-y += vhost.o
stub-obj-y += iohandler.o
+stub-obj-y += smbios_type_38.o
+stub-obj-y += ipmi.o
+stub-obj-y += pc_madt_cpu_entry.o
diff --git a/stubs/ipmi.c b/stubs/ipmi.c
new file mode 100644
index 0000000000..98b6dcee0d
--- /dev/null
+++ b/stubs/ipmi.c
@@ -0,0 +1,14 @@
+/*
+ * IPMI ACPI firmware handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "hw/acpi/ipmi.h"
+
+void build_acpi_ipmi_devices(Aml *table, BusState *bus)
+{
+}
diff --git a/stubs/pc_madt_cpu_entry.c b/stubs/pc_madt_cpu_entry.c
new file mode 100644
index 0000000000..427e772868
--- /dev/null
+++ b/stubs/pc_madt_cpu_entry.c
@@ -0,0 +1,7 @@
+#include "qemu/osdep.h"
+#include "hw/i386/pc.h"
+
+void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
+ CPUArchIdList *apic_ids, GArray *entry)
+{
+}
diff --git a/stubs/smbios_type_38.c b/stubs/smbios_type_38.c
new file mode 100644
index 0000000000..9528c2c28e
--- /dev/null
+++ b/stubs/smbios_type_38.c
@@ -0,0 +1,14 @@
+/*
+ * IPMI SMBIOS firmware handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "hw/smbios/ipmi.h"
+
+void smbios_build_type_38_table(void)
+{
+}
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index e71ea70ea1..ce8d2965b2 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -326,7 +326,6 @@ void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
int is_write, int is_user, uintptr_t retaddr);
#define cpu_list alpha_cpu_list
-#define cpu_exec cpu_alpha_exec
#define cpu_signal_handler cpu_alpha_signal_handler
#include "exec/cpu-all.h"
@@ -467,7 +466,6 @@ AlphaCPU *cpu_alpha_init(const char *cpu_model);
#define cpu_init(cpu_model) CPU(cpu_alpha_init(cpu_model))
void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf);
-int cpu_alpha_exec(CPUState *cpu);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
diff --git a/target-alpha/translate.c b/target-alpha/translate.c
index 243567b8fc..0ea0e6e146 100644
--- a/target-alpha/translate.c
+++ b/target-alpha/translate.c
@@ -449,10 +449,13 @@ static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
static bool in_superpage(DisasContext *ctx, int64_t addr)
{
+#ifndef CONFIG_USER_ONLY
return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
- && addr < 0
- && ((addr >> 41) & 3) == 2
- && addr >> TARGET_VIRT_ADDR_SPACE_BITS == addr >> 63);
+ && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
+ && ((addr >> 41) & 3) == 2);
+#else
+ return false;
+#endif
}
static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
diff --git a/target-arm/arm-semi.c b/target-arm/arm-semi.c
index 8be0645eb0..d50726f65d 100644
--- a/target-arm/arm-semi.c
+++ b/target-arm/arm-semi.c
@@ -564,8 +564,10 @@ target_ulong do_arm_semihosting(CPUARMState *env)
}
case TARGET_SYS_HEAPINFO:
{
- uint32_t *ptr;
+ target_ulong retvals[4];
uint32_t limit;
+ int i;
+
GET_ARG(0);
#ifdef CONFIG_USER_ONLY
@@ -587,30 +589,33 @@ target_ulong do_arm_semihosting(CPUARMState *env)
ts->heap_limit = limit;
}
- ptr = lock_user(VERIFY_WRITE, arg0, 16, 0);
- if (!ptr) {
- /* FIXME - should this error code be -TARGET_EFAULT ? */
- return (uint32_t)-1;
- }
- ptr[0] = tswap32(ts->heap_base);
- ptr[1] = tswap32(ts->heap_limit);
- ptr[2] = tswap32(ts->stack_base);
- ptr[3] = tswap32(0); /* Stack limit. */
- unlock_user(ptr, arg0, 16);
+ retvals[0] = ts->heap_base;
+ retvals[1] = ts->heap_limit;
+ retvals[2] = ts->stack_base;
+ retvals[3] = 0; /* Stack limit. */
#else
limit = ram_size;
- ptr = lock_user(VERIFY_WRITE, arg0, 16, 0);
- if (!ptr) {
- /* FIXME - should this error code be -TARGET_EFAULT ? */
- return (uint32_t)-1;
- }
/* TODO: Make this use the limit of the loaded application. */
- ptr[0] = tswap32(limit / 2);
- ptr[1] = tswap32(limit);
- ptr[2] = tswap32(limit); /* Stack base */
- ptr[3] = tswap32(0); /* Stack limit. */
- unlock_user(ptr, arg0, 16);
+ retvals[0] = limit / 2;
+ retvals[1] = limit;
+ retvals[2] = limit; /* Stack base */
+ retvals[3] = 0; /* Stack limit. */
#endif
+
+ for (i = 0; i < ARRAY_SIZE(retvals); i++) {
+ bool fail;
+
+ if (is_a64(env)) {
+ fail = put_user_u64(retvals[i], arg0 + i * 8);
+ } else {
+ fail = put_user_u32(retvals[i], arg0 + i * 4);
+ }
+
+ if (fail) {
+ /* Couldn't write back to argument block */
+ return -1;
+ }
+ }
return 0;
}
case TARGET_SYS_EXIT:
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 7938ddc91c..e2fac46909 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -703,7 +703,6 @@ int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
#endif
ARMCPU *cpu_arm_init(const char *cpu_model);
-int cpu_arm_exec(CPUState *cpu);
target_ulong do_arm_semihosting(CPUARMState *env);
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -1891,7 +1890,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
-#define cpu_exec cpu_arm_exec
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c
index 7c63556697..41e48a41b4 100644
--- a/target-arm/helper-a64.c
+++ b/target-arm/helper-a64.c
@@ -344,12 +344,12 @@ float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
if (float32_is_any_nan(a)) {
float32 nan = a;
- if (float32_is_signaling_nan(a)) {
+ if (float32_is_signaling_nan(a, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float32_maybe_silence_nan(a);
+ nan = float32_maybe_silence_nan(a, fpst);
}
if (fpst->default_nan_mode) {
- nan = float32_default_nan;
+ nan = float32_default_nan(fpst);
}
return nan;
}
@@ -373,12 +373,12 @@ float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
if (float64_is_any_nan(a)) {
float64 nan = a;
- if (float64_is_signaling_nan(a)) {
+ if (float64_is_signaling_nan(a, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float64_maybe_silence_nan(a);
+ nan = float64_maybe_silence_nan(a, fpst);
}
if (fpst->default_nan_mode) {
- nan = float64_default_nan;
+ nan = float64_default_nan(fpst);
}
return nan;
}
@@ -407,7 +407,7 @@ float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
set_float_rounding_mode(float_round_to_zero, &tstat);
set_float_exception_flags(0, &tstat);
r = float64_to_float32(a, &tstat);
- r = float32_maybe_silence_nan(r);
+ r = float32_maybe_silence_nan(r, &tstat);
exflags = get_float_exception_flags(&tstat);
if (exflags & float_flag_inexact) {
r = make_float32(float32_val(r) | 1);
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 35ff7722cb..1f9cdacc59 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -8678,7 +8678,7 @@ float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
/* ARM requires that S<->D conversion of any kind of NaN generates
* a quiet NaN by forcing the most significant frac bit to 1.
*/
- return float64_maybe_silence_nan(r);
+ return float64_maybe_silence_nan(r, &env->vfp.fp_status);
}
float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
@@ -8687,7 +8687,7 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
/* ARM requires that S<->D conversion of any kind of NaN generates
* a quiet NaN by forcing the most significant frac bit to 1.
*/
- return float32_maybe_silence_nan(r);
+ return float32_maybe_silence_nan(r, &env->vfp.fp_status);
}
/* VFP3 fixed point conversion. */
@@ -8786,7 +8786,7 @@ static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float32 r = float16_to_float32(make_float16(a), ieee, s);
if (ieee) {
- return float32_maybe_silence_nan(r);
+ return float32_maybe_silence_nan(r, s);
}
return r;
}
@@ -8796,7 +8796,7 @@ static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float16 r = float32_to_float16(a, ieee, s);
if (ieee) {
- r = float16_maybe_silence_nan(r);
+ r = float16_maybe_silence_nan(r, s);
}
return float16_val(r);
}
@@ -8826,7 +8826,7 @@ float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
if (ieee) {
- return float64_maybe_silence_nan(r);
+ return float64_maybe_silence_nan(r, &env->vfp.fp_status);
}
return r;
}
@@ -8836,7 +8836,7 @@ uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
if (ieee) {
- r = float16_maybe_silence_nan(r);
+ r = float16_maybe_silence_nan(r, &env->vfp.fp_status);
}
return float16_val(r);
}
@@ -8986,12 +8986,12 @@ float32 HELPER(recpe_f32)(float32 input, void *fpstp)
if (float32_is_any_nan(f32)) {
float32 nan = f32;
- if (float32_is_signaling_nan(f32)) {
+ if (float32_is_signaling_nan(f32, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float32_maybe_silence_nan(f32);
+ nan = float32_maybe_silence_nan(f32, fpst);
}
if (fpst->default_nan_mode) {
- nan = float32_default_nan;
+ nan = float32_default_nan(fpst);
}
return nan;
} else if (float32_is_infinity(f32)) {
@@ -9040,12 +9040,12 @@ float64 HELPER(recpe_f64)(float64 input, void *fpstp)
/* Deal with any special cases */
if (float64_is_any_nan(f64)) {
float64 nan = f64;
- if (float64_is_signaling_nan(f64)) {
+ if (float64_is_signaling_nan(f64, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float64_maybe_silence_nan(f64);
+ nan = float64_maybe_silence_nan(f64, fpst);
}
if (fpst->default_nan_mode) {
- nan = float64_default_nan;
+ nan = float64_default_nan(fpst);
}
return nan;
} else if (float64_is_infinity(f64)) {
@@ -9147,12 +9147,12 @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
if (float32_is_any_nan(f32)) {
float32 nan = f32;
- if (float32_is_signaling_nan(f32)) {
+ if (float32_is_signaling_nan(f32, s)) {
float_raise(float_flag_invalid, s);
- nan = float32_maybe_silence_nan(f32);
+ nan = float32_maybe_silence_nan(f32, s);
}
if (s->default_nan_mode) {
- nan = float32_default_nan;
+ nan = float32_default_nan(s);
}
return nan;
} else if (float32_is_zero(f32)) {
@@ -9160,7 +9160,7 @@ float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
return float32_set_sign(float32_infinity, float32_is_neg(f32));
} else if (float32_is_neg(f32)) {
float_raise(float_flag_invalid, s);
- return float32_default_nan;
+ return float32_default_nan(s);
} else if (float32_is_infinity(f32)) {
return float32_zero;
}
@@ -9211,12 +9211,12 @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
if (float64_is_any_nan(f64)) {
float64 nan = f64;
- if (float64_is_signaling_nan(f64)) {
+ if (float64_is_signaling_nan(f64, s)) {
float_raise(float_flag_invalid, s);
- nan = float64_maybe_silence_nan(f64);
+ nan = float64_maybe_silence_nan(f64, s);
}
if (s->default_nan_mode) {
- nan = float64_default_nan;
+ nan = float64_default_nan(s);
}
return nan;
} else if (float64_is_zero(f64)) {
@@ -9224,7 +9224,7 @@ float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
return float64_set_sign(float64_infinity, float64_is_neg(f64));
} else if (float64_is_neg(f64)) {
float_raise(float_flag_invalid, s);
- return float64_default_nan;
+ return float64_default_nan(s);
} else if (float64_is_infinity(f64)) {
return float64_zero;
}
diff --git a/target-cris/cpu.h b/target-cris/cpu.h
index d8c47a6c2e..e6046d20ca 100644
--- a/target-cris/cpu.h
+++ b/target-cris/cpu.h
@@ -213,7 +213,6 @@ int cris_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
CRISCPU *cpu_cris_init(const char *cpu_model);
-int cpu_cris_exec(CPUState *cpu);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -260,7 +259,6 @@ enum {
#define cpu_init(cpu_model) CPU(cpu_cris_init(cpu_model))
-#define cpu_exec cpu_cris_exec
#define cpu_signal_handler cpu_cris_signal_handler
/* MMU modes definitions */
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index d9ab884c2b..474b0b937d 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -1235,7 +1235,6 @@ void x86_cpu_exec_exit(CPUState *cpu);
X86CPU *cpu_x86_init(const char *cpu_model);
X86CPU *cpu_x86_create(const char *cpu_model, Error **errp);
-int cpu_x86_exec(CPUState *cpu);
void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
@@ -1411,7 +1410,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
-#define cpu_exec cpu_x86_exec
#define cpu_signal_handler cpu_x86_signal_handler
#define cpu_list x86_cpu_list
diff --git a/target-lm32/cpu.h b/target-lm32/cpu.h
index 62880f7e4b..4efe98d828 100644
--- a/target-lm32/cpu.h
+++ b/target-lm32/cpu.h
@@ -236,7 +236,6 @@ static inline lm32_wp_t lm32_wp_type(uint32_t dc, int idx)
}
LM32CPU *cpu_lm32_init(const char *cpu_model);
-int cpu_lm32_exec(CPUState *cpu);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -257,7 +256,6 @@ bool lm32_cpu_do_semihosting(CPUState *cs);
#define cpu_init(cpu_model) CPU(cpu_lm32_init(cpu_model))
#define cpu_list lm32_cpu_list
-#define cpu_exec cpu_lm32_exec
#define cpu_signal_handler cpu_lm32_signal_handler
int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
diff --git a/target-m68k/cpu.h b/target-m68k/cpu.h
index 008a057a97..9087769997 100644
--- a/target-m68k/cpu.h
+++ b/target-m68k/cpu.h
@@ -146,7 +146,6 @@ void m68k_cpu_exec_exit(CPUState *cs);
void m68k_tcg_init(void);
void m68k_cpu_init_gdb(M68kCPU *cpu);
M68kCPU *cpu_m68k_init(const char *cpu_model);
-int cpu_m68k_exec(CPUState *cpu);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -243,7 +242,6 @@ void register_m68k_insns (CPUM68KState *env);
#define cpu_init(cpu_model) CPU(cpu_m68k_init(cpu_model))
-#define cpu_exec cpu_m68k_exec
#define cpu_signal_handler cpu_m68k_signal_handler
#define cpu_list m68k_cpu_list
diff --git a/target-m68k/helper.c b/target-m68k/helper.c
index 427cbedfd5..f52d0e3036 100644
--- a/target-m68k/helper.c
+++ b/target-m68k/helper.c
@@ -558,10 +558,10 @@ float64 HELPER(sub_cmp_f64)(CPUM68KState *env, float64 a, float64 b)
/* ??? Should flush denormals to zero. */
float64 res;
res = float64_sub(a, b, &env->fp_status);
- if (float64_is_quiet_nan(res)) {
+ if (float64_is_quiet_nan(res, &env->fp_status)) {
/* +/-inf compares equal against itself, but sub returns nan. */
- if (!float64_is_quiet_nan(a)
- && !float64_is_quiet_nan(b)) {
+ if (!float64_is_quiet_nan(a, &env->fp_status)
+ && !float64_is_quiet_nan(b, &env->fp_status)) {
res = float64_zero;
if (float64_lt_quiet(a, res, &env->fp_status))
res = float64_chs(res);
diff --git a/target-microblaze/cpu.h b/target-microblaze/cpu.h
index d17cf1e301..16815dfc6a 100644
--- a/target-microblaze/cpu.h
+++ b/target-microblaze/cpu.h
@@ -321,7 +321,6 @@ int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void mb_tcg_init(void);
MicroBlazeCPU *cpu_mb_init(const char *cpu_model);
-int cpu_mb_exec(CPUState *cpu);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -336,7 +335,6 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
#define cpu_init(cpu_model) CPU(cpu_mb_init(cpu_model))
-#define cpu_exec cpu_mb_exec
#define cpu_signal_handler cpu_mb_signal_handler
/* MMU modes definitions */
diff --git a/target-microblaze/op_helper.c b/target-microblaze/op_helper.c
index 0533939389..74a043c2ac 100644
--- a/target-microblaze/op_helper.c
+++ b/target-microblaze/op_helper.c
@@ -288,12 +288,14 @@ uint32_t helper_fcmp_un(CPUMBState *env, uint32_t a, uint32_t b)
fa.l = a;
fb.l = b;
- if (float32_is_signaling_nan(fa.f) || float32_is_signaling_nan(fb.f)) {
+ if (float32_is_signaling_nan(fa.f, &env->fp_status) ||
+ float32_is_signaling_nan(fb.f, &env->fp_status)) {
update_fpu_flags(env, float_flag_invalid);
r = 1;
}
- if (float32_is_quiet_nan(fa.f) || float32_is_quiet_nan(fb.f)) {
+ if (float32_is_quiet_nan(fa.f, &env->fp_status) ||
+ float32_is_quiet_nan(fb.f, &env->fp_status)) {
r = 1;
}
diff --git a/target-mips/cpu.h b/target-mips/cpu.h
index 4ce9d47661..1037f9b7eb 100644
--- a/target-mips/cpu.h
+++ b/target-mips/cpu.h
@@ -111,7 +111,9 @@ struct CPUMIPSFPUContext {
#define FCR0_PRID 8
#define FCR0_REV 0
/* fcsr */
+ uint32_t fcr31_rw_bitmask;
uint32_t fcr31;
+#define FCR31_FS 24
#define FCR31_ABS2008 19
#define FCR31_NAN2008 18
#define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
@@ -674,7 +676,6 @@ void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf);
-#define cpu_exec cpu_mips_exec
#define cpu_signal_handler cpu_mips_signal_handler
#define cpu_list mips_cpu_list
@@ -800,7 +801,6 @@ enum {
*/
#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
-int cpu_mips_exec(CPUState *cpu);
void mips_tcg_init(void);
MIPSCPU *cpu_mips_init(const char *cpu_model);
int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc);
@@ -825,6 +825,11 @@ void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
/* helper.c */
int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
int mmu_idx);
+
+/* op_helper.c */
+uint32_t float_class_s(uint32_t arg, float_status *fst);
+uint64_t float_class_d(uint64_t arg, float_status *fst);
+
#if !defined(CONFIG_USER_ONLY)
void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra);
hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
@@ -844,14 +849,21 @@ static inline void restore_rounding_mode(CPUMIPSState *env)
static inline void restore_flush_mode(CPUMIPSState *env)
{
- set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0,
+ set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0,
&env->active_fpu.fp_status);
}
+static inline void restore_snan_bit_mode(CPUMIPSState *env)
+{
+ set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0,
+ &env->active_fpu.fp_status);
+}
+
static inline void restore_fp_status(CPUMIPSState *env)
{
restore_rounding_mode(env);
restore_flush_mode(env);
+ restore_snan_bit_mode(env);
}
static inline void restore_msa_fp_status(CPUMIPSState *env)
diff --git a/target-mips/gdbstub.c b/target-mips/gdbstub.c
index 2707ff5c2b..7c682289c2 100644
--- a/target-mips/gdbstub.c
+++ b/target-mips/gdbstub.c
@@ -90,11 +90,9 @@ int mips_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 72) {
switch (n) {
case 70:
- env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
- /* set rounding mode */
- restore_rounding_mode(env);
- /* set flush-to-zero mode */
- restore_flush_mode(env);
+ env->active_fpu.fcr31 = (tmp & env->active_fpu.fcr31_rw_bitmask) |
+ (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
+ restore_fp_status(env);
break;
case 71:
/* FIR is read-only. Ignore writes. */
diff --git a/target-mips/helper.h b/target-mips/helper.h
index 594341d258..666936c81b 100644
--- a/target-mips/helper.h
+++ b/target-mips/helper.h
@@ -207,8 +207,6 @@ DEF_HELPER_4(ctc1, void, env, tl, i32, i32)
DEF_HELPER_2(float_cvtd_s, i64, env, i32)
DEF_HELPER_2(float_cvtd_w, i64, env, i32)
DEF_HELPER_2(float_cvtd_l, i64, env, i64)
-DEF_HELPER_2(float_cvtl_d, i64, env, i64)
-DEF_HELPER_2(float_cvtl_s, i64, env, i32)
DEF_HELPER_2(float_cvtps_pw, i64, env, i64)
DEF_HELPER_2(float_cvtpw_ps, i64, env, i64)
DEF_HELPER_2(float_cvts_d, i32, env, i64)
@@ -216,14 +214,12 @@ DEF_HELPER_2(float_cvts_w, i32, env, i32)
DEF_HELPER_2(float_cvts_l, i32, env, i64)
DEF_HELPER_2(float_cvts_pl, i32, env, i32)
DEF_HELPER_2(float_cvts_pu, i32, env, i32)
-DEF_HELPER_2(float_cvtw_s, i32, env, i32)
-DEF_HELPER_2(float_cvtw_d, i32, env, i64)
DEF_HELPER_3(float_addr_ps, i64, env, i64, i64)
DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64)
-DEF_HELPER_FLAGS_1(float_class_s, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(float_class_d, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32)
+DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
#define FOP_PROTO(op) \
DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
@@ -242,14 +238,20 @@ FOP_PROTO(mina)
#undef FOP_PROTO
#define FOP_PROTO(op) \
-DEF_HELPER_2(float_ ## op ## l_s, i64, env, i32) \
-DEF_HELPER_2(float_ ## op ## l_d, i64, env, i64) \
-DEF_HELPER_2(float_ ## op ## w_s, i32, env, i32) \
-DEF_HELPER_2(float_ ## op ## w_d, i32, env, i64)
+DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \
+DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64)
+FOP_PROTO(cvt)
FOP_PROTO(round)
FOP_PROTO(trunc)
FOP_PROTO(ceil)
FOP_PROTO(floor)
+FOP_PROTO(cvt_2008)
+FOP_PROTO(round_2008)
+FOP_PROTO(trunc_2008)
+FOP_PROTO(ceil_2008)
+FOP_PROTO(floor_2008)
#undef FOP_PROTO
#define FOP_PROTO(op) \
diff --git a/target-mips/msa_helper.c b/target-mips/msa_helper.c
index ae92fcbe28..1fdb0d9792 100644
--- a/target-mips/msa_helper.c
+++ b/target-mips/msa_helper.c
@@ -1495,11 +1495,11 @@ MSA_UNOP_DF(pcnt)
#define FLOAT_ONE32 make_float32(0x3f8 << 20)
#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
-#define FLOAT_SNAN16 (float16_default_nan ^ 0x0220)
+#define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220)
/* 0x7c20 */
-#define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020)
+#define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020)
/* 0x7f800020 */
-#define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL)
+#define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL)
/* 0x7ff0000000000020 */
static inline void clear_msacsr_cause(CPUMIPSState *env)
@@ -1612,7 +1612,7 @@ static inline float16 float16_from_float32(int32_t a, flag ieee,
float16 f_val;
f_val = float32_to_float16((float32)a, ieee, status);
- f_val = float16_maybe_silence_nan(f_val);
+ f_val = float16_maybe_silence_nan(f_val, status);
return a < 0 ? (f_val | (1 << 15)) : f_val;
}
@@ -1622,7 +1622,7 @@ static inline float32 float32_from_float64(int64_t a, float_status *status)
float32 f_val;
f_val = float64_to_float32((float64)a, status);
- f_val = float32_maybe_silence_nan(f_val);
+ f_val = float32_maybe_silence_nan(f_val, status);
return a < 0 ? (f_val | (1 << 31)) : f_val;
}
@@ -1633,7 +1633,7 @@ static inline float32 float32_from_float16(int16_t a, flag ieee,
float32 f_val;
f_val = float16_to_float32((float16)a, ieee, status);
- f_val = float32_maybe_silence_nan(f_val);
+ f_val = float32_maybe_silence_nan(f_val, status);
return a < 0 ? (f_val | (1 << 31)) : f_val;
}
@@ -1643,7 +1643,7 @@ static inline float64 float64_from_float32(int32_t a, float_status *status)
float64 f_val;
f_val = float32_to_float64((float64)a, status);
- f_val = float64_maybe_silence_nan(f_val);
+ f_val = float64_maybe_silence_nan(f_val, status);
return a < 0 ? (f_val | (1ULL << 63)) : f_val;
}
@@ -1789,7 +1789,7 @@ static inline int32_t float64_to_q32(float64 a, float_status *status)
c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
@@ -2388,7 +2388,7 @@ void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
@@ -2524,7 +2524,7 @@ void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
@@ -2643,7 +2643,7 @@ void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
@@ -2694,7 +2694,7 @@ void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \
} \
} while (0)
@@ -2731,9 +2731,9 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
msa_move_v(pwd, pwx);
}
-#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \
- !float ## BITS ## _is_any_nan(ARG1) \
- && float ## BITS ## _is_quiet_nan(ARG2)
+#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \
+ !float ## BITS ## _is_any_nan(ARG1) \
+ && float ## BITS ## _is_quiet_nan(ARG2, STATUS)
#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
do { \
@@ -2745,18 +2745,18 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
c = update_msacsr(env, 0, 0); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
-#define FMAXMIN_A(F, G, X, _S, _T, BITS) \
+#define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \
do { \
uint## BITS ##_t S = _S, T = _T; \
uint## BITS ##_t as, at, xs, xt, xd; \
- if (NUMBER_QNAN_PAIR(S, T, BITS)) { \
+ if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \
T = S; \
} \
- else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \
+ else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \
S = T; \
} \
as = float## BITS ##_abs(S); \
@@ -2770,6 +2770,7 @@ void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
uint32_t ws, uint32_t wt)
{
+ float_status *status = &env->active_tc.msa_fp_status;
wr_t wx, *pwx = &wx;
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
@@ -2781,9 +2782,9 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
switch (df) {
case DF_WORD:
for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
- if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) {
+ if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) {
MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32);
- } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) {
+ } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) {
MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32);
} else {
MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32);
@@ -2792,9 +2793,9 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
break;
case DF_DOUBLE:
for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
- if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) {
+ if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) {
MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64);
- } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) {
+ } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) {
MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64);
} else {
MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64);
@@ -2813,6 +2814,7 @@ void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
uint32_t ws, uint32_t wt)
{
+ float_status *status = &env->active_tc.msa_fp_status;
wr_t wx, *pwx = &wx;
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
@@ -2824,12 +2826,12 @@ void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
switch (df) {
case DF_WORD:
for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
- FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32);
+ FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32, status);
}
break;
case DF_DOUBLE:
for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
- FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64);
+ FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64, status);
}
break;
default:
@@ -2844,6 +2846,7 @@ void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
uint32_t ws, uint32_t wt)
{
+ float_status *status = &env->active_tc.msa_fp_status;
wr_t wx, *pwx = &wx;
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
@@ -2855,9 +2858,9 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
switch (df) {
case DF_WORD:
for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
- if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32)) {
+ if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) {
MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32);
- } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32)) {
+ } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) {
MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32);
} else {
MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32);
@@ -2866,9 +2869,9 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
break;
case DF_DOUBLE:
for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
- if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64)) {
+ if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) {
MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64);
- } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64)) {
+ } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) {
MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64);
} else {
MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64);
@@ -2887,6 +2890,7 @@ void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
uint32_t ws, uint32_t wt)
{
+ float_status *status = &env->active_tc.msa_fp_status;
wr_t wx, *pwx = &wx;
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
@@ -2898,12 +2902,12 @@ void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
switch (df) {
case DF_WORD:
for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
- FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32);
+ FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32, status);
}
break;
case DF_DOUBLE:
for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
- FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64);
+ FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64, status);
}
break;
default:
@@ -2918,16 +2922,18 @@ void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
uint32_t wd, uint32_t ws)
{
+ float_status* status = &env->active_tc.msa_fp_status;
+
wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
wr_t *pws = &(env->active_fpu.fpr[ws].wr);
if (df == DF_WORD) {
- pwd->w[0] = helper_float_class_s(pws->w[0]);
- pwd->w[1] = helper_float_class_s(pws->w[1]);
- pwd->w[2] = helper_float_class_s(pws->w[2]);
- pwd->w[3] = helper_float_class_s(pws->w[3]);
+ pwd->w[0] = float_class_s(pws->w[0], status);
+ pwd->w[1] = float_class_s(pws->w[1], status);
+ pwd->w[2] = float_class_s(pws->w[2], status);
+ pwd->w[3] = float_class_s(pws->w[3], status);
} else {
- pwd->d[0] = helper_float_class_d(pws->d[0]);
- pwd->d[1] = helper_float_class_d(pws->d[1]);
+ pwd->d[0] = float_class_d(pws->d[0], status);
+ pwd->d[1] = float_class_d(pws->d[1], status);
}
}
@@ -2941,7 +2947,7 @@ void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} else if (float ## BITS ## _is_any_nan(ARG)) { \
DEST = 0; \
} \
@@ -3045,12 +3051,12 @@ void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
set_float_exception_flags(0, status); \
DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
- float ## BITS ## _is_quiet_nan(DEST) ? \
+ float ## BITS ## _is_quiet_nan(DEST, status) ? \
0 : RECIPROCAL_INEXACT, \
IS_DENORMAL(DEST, BITS)); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
@@ -3166,7 +3172,7 @@ void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
\
if (get_enabled_exceptions(env, c)) { \
- DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
} \
} while (0)
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index 1ae1dda0af..69daade24e 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -2447,6 +2447,7 @@ void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
#define FLOAT_TWO32 make_float32(1 << 30)
#define FLOAT_TWO64 make_float64(1ULL << 62)
+
#define FP_TO_INT32_OVERFLOW 0x7fffffff
#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
@@ -2574,21 +2575,13 @@ void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt)
((arg1 & 0x4) << 22);
break;
case 31:
- if (env->insn_flags & ISA_MIPS32R6) {
- uint32_t mask = 0xfefc0000;
- env->active_fpu.fcr31 = (arg1 & ~mask) |
- (env->active_fpu.fcr31 & mask);
- } else if (!(arg1 & 0x007c0000)) {
- env->active_fpu.fcr31 = arg1;
- }
+ env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) |
+ (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
break;
default:
return;
}
- /* set rounding mode */
- restore_rounding_mode(env);
- /* set flush-to-zero mode */
- restore_flush_mode(env);
+ restore_fp_status(env);
set_float_exception_flags(0, &env->active_fpu.fp_status);
if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
do_raise_exception(env, EXCP_FPE, GETPC());
@@ -2659,7 +2652,7 @@ uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
uint64_t fdt2;
fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
- fdt2 = float64_maybe_silence_nan(fdt2);
+ fdt2 = float64_maybe_silence_nan(fdt2, &env->active_fpu.fp_status);
update_fcr31(env, GETPC());
return fdt2;
}
@@ -2682,7 +2675,7 @@ uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
return fdt2;
}
-uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
+uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
@@ -2695,7 +2688,7 @@ uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
return dt2;
}
-uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0)
+uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
@@ -2749,7 +2742,7 @@ uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
uint32_t fst2;
fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
- fst2 = float32_maybe_silence_nan(fst2);
+ fst2 = float32_maybe_silence_nan(fst2, &env->active_fpu.fp_status);
update_fcr31(env, GETPC());
return fst2;
}
@@ -2790,7 +2783,7 @@ uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
return wt2;
}
-uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
+uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
@@ -2803,7 +2796,7 @@ uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
return wt2;
}
-uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
+uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
@@ -2816,7 +2809,7 @@ uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
return wt2;
}
-uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
+uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
@@ -2831,7 +2824,7 @@ uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
return dt2;
}
-uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
+uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
@@ -2846,7 +2839,7 @@ uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
return dt2;
}
-uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
+uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
@@ -2861,7 +2854,7 @@ uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
return wt2;
}
-uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
+uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
@@ -2876,7 +2869,7 @@ uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
return wt2;
}
-uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
+uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
@@ -2889,7 +2882,7 @@ uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
return dt2;
}
-uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
+uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
@@ -2902,7 +2895,7 @@ uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
return dt2;
}
-uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
+uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
@@ -2915,7 +2908,7 @@ uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
return wt2;
}
-uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
+uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
@@ -2928,7 +2921,7 @@ uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
return wt2;
}
-uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
+uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
@@ -2943,7 +2936,7 @@ uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
return dt2;
}
-uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
+uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
@@ -2958,7 +2951,7 @@ uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
return dt2;
}
-uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
+uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
@@ -2973,7 +2966,7 @@ uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
return wt2;
}
-uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
+uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
@@ -2988,7 +2981,7 @@ uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
return wt2;
}
-uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
+uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0)
{
uint64_t dt2;
@@ -3003,7 +2996,7 @@ uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
return dt2;
}
-uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
+uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0)
{
uint64_t dt2;
@@ -3018,7 +3011,7 @@ uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
return dt2;
}
-uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
+uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0)
{
uint32_t wt2;
@@ -3033,7 +3026,7 @@ uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
return wt2;
}
-uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
+uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0)
{
uint32_t wt2;
@@ -3048,6 +3041,334 @@ uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
return wt2;
}
+uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
/* unary operations, not modifying fp status */
#define FLOAT_UNOP(name) \
uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
@@ -3199,11 +3520,12 @@ FLOAT_RINT(rint_d, 64)
#define FLOAT_CLASS_POSITIVE_ZERO 0x200
#define FLOAT_CLASS(name, bits) \
-uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \
+uint ## bits ## _t float_ ## name (uint ## bits ## _t arg, \
+ float_status *status) \
{ \
- if (float ## bits ## _is_signaling_nan(arg)) { \
+ if (float ## bits ## _is_signaling_nan(arg, status)) { \
return FLOAT_CLASS_SIGNALING_NAN; \
- } else if (float ## bits ## _is_quiet_nan(arg)) { \
+ } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
return FLOAT_CLASS_QUIET_NAN; \
} else if (float ## bits ## _is_neg(arg)) { \
if (float ## bits ## _is_infinity(arg)) { \
@@ -3226,6 +3548,12 @@ uint ## bits ## _t helper_float_ ## name (uint ## bits ## _t arg) \
return FLOAT_CLASS_POSITIVE_NORMAL; \
} \
} \
+} \
+ \
+uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
+ uint ## bits ## _t arg) \
+{ \
+ return float_ ## name(arg, &env->active_fpu.fp_status); \
}
FLOAT_CLASS(class_s, 32)
diff --git a/target-mips/translate.c b/target-mips/translate.c
index aaa1d02683..cc321e9cce 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -1435,6 +1435,8 @@ typedef struct DisasContext {
bool vp;
bool cmgcr;
bool mrp;
+ bool nan2008;
+ bool abs2008;
} DisasContext;
enum {
@@ -8890,7 +8892,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_abs_s(fp0, fp0);
+ if (ctx->abs2008) {
+ tcg_gen_andi_i32(fp0, fp0, 0x7fffffffUL);
+ } else {
+ gen_helper_float_abs_s(fp0, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -8909,7 +8915,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_chs_s(fp0, fp0);
+ if (ctx->abs2008) {
+ tcg_gen_xori_i32(fp0, fp0, 1UL << 31);
+ } else {
+ gen_helper_float_chs_s(fp0, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -8921,7 +8931,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_roundl_s(fp64, cpu_env, fp32);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_round_l_s(fp64, cpu_env, fp32);
+ }
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
@@ -8934,7 +8948,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_truncl_s(fp64, cpu_env, fp32);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
+ }
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
@@ -8947,7 +8965,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_ceill_s(fp64, cpu_env, fp32);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
+ }
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
@@ -8960,7 +8982,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_floorl_s(fp64, cpu_env, fp32);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
+ }
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
@@ -8971,7 +8997,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_roundw_s(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_round_w_s(fp0, cpu_env, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -8981,7 +9011,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_truncw_s(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -8991,7 +9025,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_ceilw_s(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -9001,7 +9039,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_floorw_s(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -9121,7 +9163,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
{
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_class_s(fp0, fp0);
+ gen_helper_float_class_s(fp0, cpu_env, fp0);
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -9250,7 +9292,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i32 fp0 = tcg_temp_new_i32();
gen_load_fpr32(ctx, fp0, fs);
- gen_helper_float_cvtw_s(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
+ }
gen_store_fpr32(ctx, fp0, fd);
tcg_temp_free_i32(fp0);
}
@@ -9262,7 +9308,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr32(ctx, fp32, fs);
- gen_helper_float_cvtl_s(fp64, cpu_env, fp32);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
+ }
tcg_temp_free_i32(fp32);
gen_store_fpr64(ctx, fp64, fd);
tcg_temp_free_i64(fp64);
@@ -9380,7 +9430,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_abs_d(fp0, fp0);
+ if (ctx->abs2008) {
+ tcg_gen_andi_i64(fp0, fp0, 0x7fffffffffffffffULL);
+ } else {
+ gen_helper_float_abs_d(fp0, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9401,7 +9455,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_chs_d(fp0, fp0);
+ if (ctx->abs2008) {
+ tcg_gen_xori_i64(fp0, fp0, 1ULL << 63);
+ } else {
+ gen_helper_float_chs_d(fp0, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9412,7 +9470,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_roundl_d(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_round_l_d(fp0, cpu_env, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9423,7 +9485,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_truncl_d(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9434,7 +9500,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_ceill_d(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9445,7 +9515,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_floorl_d(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9457,7 +9531,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_roundw_d(fp32, cpu_env, fp64);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_round_w_d(fp32, cpu_env, fp64);
+ }
tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
tcg_temp_free_i32(fp32);
@@ -9470,7 +9548,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_truncw_d(fp32, cpu_env, fp64);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
+ }
tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
tcg_temp_free_i32(fp32);
@@ -9483,7 +9565,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_ceilw_d(fp32, cpu_env, fp64);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
+ }
tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
tcg_temp_free_i32(fp32);
@@ -9496,7 +9582,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_floorw_d(fp32, cpu_env, fp64);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
+ }
tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
tcg_temp_free_i32(fp32);
@@ -9619,7 +9709,7 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_class_d(fp0, fp0);
+ gen_helper_float_class_d(fp0, cpu_env, fp0);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -9769,7 +9859,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp64 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp64, fs);
- gen_helper_float_cvtw_d(fp32, cpu_env, fp64);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
+ }
tcg_temp_free_i64(fp64);
gen_store_fpr32(ctx, fp32, fd);
tcg_temp_free_i32(fp32);
@@ -9781,7 +9875,11 @@ static void gen_farith (DisasContext *ctx, enum fopcode op1,
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- gen_helper_float_cvtl_d(fp0, cpu_env, fp0);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
+ }
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
@@ -19786,6 +19884,8 @@ void gen_intermediate_code(CPUMIPSState *env, struct TranslationBlock *tb)
(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F));
ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
ctx.mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1;
+ ctx.nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1;
+ ctx.abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1;
restore_cpu_state(env, &ctx);
#ifdef CONFIG_USER_ONLY
ctx.mem_idx = MIPS_HFLAG_UM;
@@ -20141,6 +20241,7 @@ void cpu_state_reset(CPUMIPSState *env)
env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
+ env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask;
env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31;
env->msair = env->cpu_model->MSAIR;
env->insn_flags = env->cpu_model->insn_flags;
@@ -20251,8 +20352,7 @@ void cpu_state_reset(CPUMIPSState *env)
}
compute_hflags(env);
- restore_rounding_mode(env);
- restore_flush_mode(env);
+ restore_fp_status(env);
restore_pamask(env);
cs->exception_index = EXCP_NONE;
diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c
index 5af077d0de..b10284cc5d 100644
--- a/target-mips/translate_init.c
+++ b/target-mips/translate_init.c
@@ -84,6 +84,7 @@ struct mips_def_t {
int32_t CP0_TCStatus_rw_bitmask;
int32_t CP0_SRSCtl;
int32_t CP1_fcr0;
+ int32_t CP1_fcr31_rw_bitmask;
int32_t CP1_fcr31;
int32_t MSAIR;
int32_t SEGBITS;
@@ -273,6 +274,8 @@ static const mips_def_t mips_defs[] =
.CP0_Status_rw_bitmask = 0x3678FF1F,
.CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
(1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 32,
.PABITS = 32,
.insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
@@ -303,6 +306,8 @@ static const mips_def_t mips_defs[] =
(0xff << CP0TCSt_TASID),
.CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
(1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.CP0_SRSCtl = (0xf << CP0SRSCtl_HSS),
.CP0_SRSConf0_rw_bitmask = 0x3fffffff,
.CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) |
@@ -343,6 +348,8 @@ static const mips_def_t mips_defs[] =
.CP0_Status_rw_bitmask = 0x3778FF1F,
.CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
(1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 32,
.PABITS = 32,
.insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSPR2,
@@ -427,6 +434,7 @@ static const mips_def_t mips_defs[] =
(1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
(1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID),
.CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 32,
.PABITS = 40,
.insn_flags = CPU_MIPS32R5 | ASE_MSA,
@@ -465,6 +473,7 @@ static const mips_def_t mips_defs[] =
(1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
(1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
.CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
.SEGBITS = 32,
.PABITS = 32,
.insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS,
@@ -485,6 +494,8 @@ static const mips_def_t mips_defs[] =
.CP0_Status_rw_bitmask = 0x3678FFFF,
/* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */
.CP1_fcr0 = (0x5 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0x0183FFFF,
.SEGBITS = 40,
.PABITS = 36,
.insn_flags = CPU_MIPS3,
@@ -503,6 +514,8 @@ static const mips_def_t mips_defs[] =
.CP0_Status_rw_bitmask = 0x3678FFFF,
/* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */
.CP1_fcr0 = (0x54 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 40,
.PABITS = 32,
.insn_flags = CPU_VR54XX,
@@ -548,6 +561,8 @@ static const mips_def_t mips_defs[] =
/* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */
.CP1_fcr0 = (1 << FCR0_D) | (1 << FCR0_S) |
(0x81 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 42,
.PABITS = 36,
.insn_flags = CPU_MIPS64,
@@ -575,6 +590,8 @@ static const mips_def_t mips_defs[] =
.CP1_fcr0 = (1 << FCR0_3D) | (1 << FCR0_PS) |
(1 << FCR0_D) | (1 << FCR0_S) |
(0x82 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 40,
.PABITS = 36,
.insn_flags = CPU_MIPS64 | ASE_MIPS3D,
@@ -601,6 +618,8 @@ static const mips_def_t mips_defs[] =
.CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
(1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
(1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 42,
.PABITS = 36,
.insn_flags = CPU_MIPS64R2 | ASE_MIPS3D,
@@ -686,6 +705,7 @@ static const mips_def_t mips_defs[] =
(1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
(1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
.CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
.SEGBITS = 48,
.PABITS = 48,
.insn_flags = CPU_MIPS64R6 | ASE_MSA,
@@ -704,6 +724,8 @@ static const mips_def_t mips_defs[] =
.CCRes = 2,
.CP0_Status_rw_bitmask = 0x35D0FFFF,
.CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 40,
.PABITS = 40,
.insn_flags = CPU_LOONGSON2E,
@@ -722,6 +744,8 @@ static const mips_def_t mips_defs[] =
.CCRes = 2,
.CP0_Status_rw_bitmask = 0xF5D0FF1F, /* Bits 7:5 not writable. */
.CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 40,
.PABITS = 40,
.insn_flags = CPU_LOONGSON2F,
@@ -749,6 +773,8 @@ static const mips_def_t mips_defs[] =
.CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
(1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
(1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
.SEGBITS = 42,
.PABITS = 36,
.insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSPR2,
@@ -892,4 +918,7 @@ static void msa_reset(CPUMIPSState *env)
/* clear float_status nan mode */
set_default_nan_mode(0, &env->active_tc.msa_fp_status);
+
+ /* set proper signanling bit meaning ("1" means "quiet") */
+ set_snan_bit_is_one(0, &env->active_tc.msa_fp_status);
}
diff --git a/target-moxie/cpu.h b/target-moxie/cpu.h
index c10898eba7..63d5cafc55 100644
--- a/target-moxie/cpu.h
+++ b/target-moxie/cpu.h
@@ -109,7 +109,6 @@ static inline MoxieCPU *moxie_env_get_cpu(CPUMoxieState *env)
#define ENV_OFFSET offsetof(MoxieCPU, env)
MoxieCPU *cpu_moxie_init(const char *cpu_model);
-int cpu_moxie_exec(CPUState *cpu);
void moxie_cpu_do_interrupt(CPUState *cs);
void moxie_cpu_dump_state(CPUState *cpu, FILE *f,
fprintf_function cpu_fprintf, int flags);
@@ -120,7 +119,6 @@ int cpu_moxie_signal_handler(int host_signum, void *pinfo,
#define cpu_init(cpu_model) CPU(cpu_moxie_init(cpu_model))
-#define cpu_exec cpu_moxie_exec
#define cpu_signal_handler cpu_moxie_signal_handler
static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h
index 810a280061..9451a7cca6 100644
--- a/target-openrisc/cpu.h
+++ b/target-openrisc/cpu.h
@@ -344,7 +344,6 @@ static inline OpenRISCCPU *openrisc_env_get_cpu(CPUOpenRISCState *env)
OpenRISCCPU *cpu_openrisc_init(const char *cpu_model);
void cpu_openrisc_list(FILE *f, fprintf_function cpu_fprintf);
-int cpu_openrisc_exec(CPUState *cpu);
void openrisc_cpu_do_interrupt(CPUState *cpu);
bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
void openrisc_cpu_dump_state(CPUState *cpu, FILE *f,
@@ -358,7 +357,6 @@ int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
#define cpu_list cpu_openrisc_list
-#define cpu_exec cpu_openrisc_exec
#define cpu_signal_handler cpu_openrisc_signal_handler
#ifndef CONFIG_USER_ONLY
diff --git a/target-ppc/cpu-qom.h b/target-ppc/cpu-qom.h
index 0fad2def0a..286410502f 100644
--- a/target-ppc/cpu-qom.h
+++ b/target-ppc/cpu-qom.h
@@ -70,18 +70,21 @@ enum powerpc_mmu_t {
#define POWERPC_MMU_64 0x00010000
#define POWERPC_MMU_1TSEG 0x00020000
#define POWERPC_MMU_AMR 0x00040000
+#define POWERPC_MMU_64K 0x00080000
/* 64 bits PowerPC MMU */
POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
/* Architecture 2.03 and later (has LPCR) */
POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
/* Architecture 2.06 variant */
POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
| POWERPC_MMU_AMR | 0x00000003,
/* Architecture 2.06 "degraded" (no 1T segments) */
POWERPC_MMU_2_06a = POWERPC_MMU_64 | POWERPC_MMU_AMR
| 0x00000003,
/* Architecture 2.07 variant */
POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
| POWERPC_MMU_AMR | 0x00000004,
/* Architecture 2.07 "degraded" (no 1T segments) */
POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index b1354a4791..2666a3f80d 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -377,12 +377,16 @@ struct ppc_slb_t {
#define LPCR_VPM1 (1ull << (63 - 1))
#define LPCR_ISL (1ull << (63 - 2))
#define LPCR_KBV (1ull << (63 - 3))
+#define LPCR_DPFD_SHIFT (63 - 11)
+#define LPCR_DPFD (0x3ull << LPCR_DPFD_SHIFT)
+#define LPCR_VRMASD_SHIFT (63 - 16)
+#define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT)
+#define LPCR_RMLS_SHIFT (63 - 37)
+#define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT)
#define LPCR_ILE (1ull << (63 - 38))
-#define LPCR_MER (1ull << (63 - 52))
-#define LPCR_LPES0 (1ull << (63 - 60))
-#define LPCR_LPES1 (1ull << (63 - 61))
#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
+#define LPCR_ONL (1ull << (63 - 45))
#define LPCR_P7_PECE0 (1ull << (63 - 49))
#define LPCR_P7_PECE1 (1ull << (63 - 50))
#define LPCR_P7_PECE2 (1ull << (63 - 51))
@@ -391,6 +395,12 @@ struct ppc_slb_t {
#define LPCR_P8_PECE2 (1ull << (63 - 49))
#define LPCR_P8_PECE3 (1ull << (63 - 50))
#define LPCR_P8_PECE4 (1ull << (63 - 51))
+#define LPCR_MER (1ull << (63 - 52))
+#define LPCR_TC (1ull << (63 - 54))
+#define LPCR_LPES0 (1ull << (63 - 60))
+#define LPCR_LPES1 (1ull << (63 - 61))
+#define LPCR_RMI (1ull << (63 - 62))
+#define LPCR_HDICE (1ull << (63 - 63))
#define msr_sf ((env->msr >> MSR_SF) & 1)
#define msr_isf ((env->msr >> MSR_ISF) & 1)
@@ -1037,6 +1047,8 @@ struct CPUPPCState {
uint64_t insns_flags2;
#if defined(TARGET_PPC64)
struct ppc_segment_page_sizes sps;
+ ppc_slb_t vrma_slb;
+ target_ulong rmls;
bool ci_large_pages;
#endif
@@ -1189,7 +1201,6 @@ extern const struct VMStateDescription vmstate_ppc_cpu;
PowerPCCPU *cpu_ppc_init(const char *cpu_model);
void ppc_translate_init(void);
void gen_update_current_nip(void *opaque);
-int cpu_ppc_exec (CPUState *s);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
is returned if the signal was handled by the virtual CPU. */
@@ -1268,7 +1279,6 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define cpu_init(cpu_model) CPU(cpu_ppc_init(cpu_model))
-#define cpu_exec cpu_ppc_exec
#define cpu_signal_handler cpu_ppc_signal_handler
#define cpu_list ppc_cpu_list
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index 533866b87b..d6e1678a63 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -753,7 +753,6 @@ void ppc_cpu_do_interrupt(CPUState *cs)
static void ppc_hw_interrupt(CPUPPCState *env)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
- int hdice;
#if 0
CPUState *cs = CPU(cpu);
@@ -781,19 +780,25 @@ static void ppc_hw_interrupt(CPUPPCState *env)
return;
}
#endif
- if (0) {
- /* XXX: find a suitable condition to enable the hypervisor mode */
- hdice = env->spr[SPR_LPCR] & 1;
- } else {
- hdice = 0;
- }
- if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) {
- /* Hypervisor decrementer exception */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ if ((msr_ee != 0 || msr_hv == 0) && hdice) {
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
return;
}
}
+ /* Extermal interrupt can ignore MSR:EE under some circumstances */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
+ bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ if (msr_ee != 0 || (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
+ return;
+ }
+ }
if (msr_ce != 0) {
/* External critical interrupt */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
@@ -839,17 +844,6 @@ static void ppc_hw_interrupt(CPUPPCState *env)
powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
return;
}
- /* External interrupt */
- if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
- /* Taking an external interrupt does not clear the external
- * interrupt status
- */
-#if 0
- env->pending_interrupts &= ~(1 << PPC_INTERRUPT_EXT);
-#endif
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
- return;
- }
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
@@ -944,6 +938,11 @@ void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
cs->halted = 1;
env->in_pm_state = true;
+ /* The architecture specifies that HDEC interrupts are
+ * discarded in PM states
+ */
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
+
/* Technically, nap doesn't set EE, but if we don't set it
* then ppc_hw_interrupt() won't deliver. We could add some
* other tests there based on LPCR but it's simpler to just
diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 6fd56a868d..d9795d04d0 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -73,7 +73,7 @@ void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
isneg = float64_is_neg(farg.d);
if (unlikely(float64_is_any_nan(farg.d))) {
- if (float64_is_signaling_nan(farg.d)) {
+ if (float64_is_signaling_nan(farg.d, &env->fp_status)) {
/* Signaling NaN: flags are undefined */
fprf = 0x00;
} else {
@@ -534,8 +534,8 @@ uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
/* Magnitude subtraction of infinities */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN addition */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -558,8 +558,8 @@ uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
/* Magnitude subtraction of infinities */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN subtraction */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -582,8 +582,8 @@ uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN multiplication */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -609,8 +609,8 @@ uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
/* Division of zero by zero */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
/* sNaN division */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -632,7 +632,7 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
if (unlikely(env->fp_status.float_exception_flags)) { \
if (float64_is_any_nan(arg)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
- if (float64_is_signaling_nan(arg)) { \
+ if (float64_is_signaling_nan(arg, &env->fp_status)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
} \
farg.ll = nanval; \
@@ -681,7 +681,7 @@ static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN round */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
farg.ll = arg | 0x0008000000000000ULL;
@@ -737,9 +737,9 @@ uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d) ||
- float64_is_signaling_nan(farg3.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -780,9 +780,9 @@ uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d) ||
- float64_is_signaling_nan(farg3.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -821,9 +821,9 @@ uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d) ||
- float64_is_signaling_nan(farg3.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -866,9 +866,9 @@ uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
- if (unlikely(float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d) ||
- float64_is_signaling_nan(farg3.d))) {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -903,7 +903,7 @@ uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN square root */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -921,7 +921,7 @@ uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
if (unlikely(float64_is_any_nan(farg.d))) {
- if (unlikely(float64_is_signaling_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal square root */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
farg.ll = float64_snan_to_qnan(farg.ll);
@@ -942,7 +942,7 @@ uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -958,7 +958,7 @@ uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
- if (unlikely(float64_is_signaling_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -977,7 +977,7 @@ uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
farg.ll = arg;
if (unlikely(float64_is_any_nan(farg.d))) {
- if (unlikely(float64_is_signaling_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
/* sNaN reciprocal square root */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
farg.ll = float64_snan_to_qnan(farg.ll);
@@ -1100,8 +1100,8 @@ void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
env->fpscr |= ret << FPSCR_FPRF;
env->crf[crfD] = ret;
if (unlikely(ret == 0x01UL
- && (float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d)))) {
+ && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
/* sNaN comparison */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
@@ -1131,8 +1131,8 @@ void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
env->fpscr |= ret << FPSCR_FPRF;
env->crf[crfD] = ret;
if (unlikely(ret == 0x01UL)) {
- if (float64_is_signaling_nan(farg1.d) ||
- float64_is_signaling_nan(farg2.d)) {
+ if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status)) {
/* sNaN comparison */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
POWERPC_EXCP_FP_VXVC, 1);
@@ -1168,7 +1168,7 @@ static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
u.l = val;
/* NaN are not treated the same way IEEE 754 does */
- if (unlikely(float32_is_quiet_nan(u.f))) {
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
return 0;
}
@@ -1181,7 +1181,7 @@ static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
u.l = val;
/* NaN are not treated the same way IEEE 754 does */
- if (unlikely(float32_is_quiet_nan(u.f))) {
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
return 0;
}
@@ -1194,7 +1194,7 @@ static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
u.l = val;
/* NaN are not treated the same way IEEE 754 does */
- if (unlikely(float32_is_quiet_nan(u.f))) {
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
return 0;
}
@@ -1207,7 +1207,7 @@ static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
u.l = val;
/* NaN are not treated the same way IEEE 754 does */
- if (unlikely(float32_is_quiet_nan(u.f))) {
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
return 0;
}
@@ -1245,7 +1245,7 @@ static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
u.l = val;
/* NaN are not treated the same way IEEE 754 does */
- if (unlikely(float32_is_quiet_nan(u.f))) {
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
return 0;
}
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
@@ -1261,7 +1261,7 @@ static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
u.l = val;
/* NaN are not treated the same way IEEE 754 does */
- if (unlikely(float32_is_quiet_nan(u.f))) {
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
return 0;
}
tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
@@ -1839,8 +1839,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld) || \
- tp##_is_signaling_nan(xb.fld)) { \
+ } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(xb.fld, &tstat)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
@@ -1894,8 +1894,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
(tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld) || \
- tp##_is_signaling_nan(xb.fld)) { \
+ } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(xb.fld, &tstat)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
@@ -1948,8 +1948,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} else if (tp##_is_zero(xa.fld) && \
tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
- } else if (tp##_is_signaling_nan(xa.fld) || \
- tp##_is_signaling_nan(xb.fld)) { \
+ } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(xb.fld, &tstat)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
@@ -1990,7 +1990,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_reset_fpstatus(env); \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
@@ -2039,7 +2039,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
- } else if (tp##_is_signaling_nan(xb.fld)) { \
+ } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
@@ -2089,7 +2089,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
- } else if (tp##_is_signaling_nan(xb.fld)) { \
+ } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
} \
} \
@@ -2274,9 +2274,9 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
\
if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
- if (tp##_is_signaling_nan(xa.fld) || \
- tp##_is_signaling_nan(b->fld) || \
- tp##_is_signaling_nan(c->fld)) { \
+ if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(b->fld, &tstat) || \
+ tp##_is_signaling_nan(c->fld, &tstat)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
tstat.float_exception_flags &= ~float_flag_invalid; \
} \
@@ -2358,8 +2358,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
float64_is_any_nan(xb.VsrD(0)))) { \
- if (float64_is_signaling_nan(xa.VsrD(0)) || \
- float64_is_signaling_nan(xb.VsrD(0))) { \
+ if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (ordered) { \
@@ -2406,8 +2406,8 @@ void helper_##name(CPUPPCState *env, uint32_t opcode) \
\
for (i = 0; i < nels; i++) { \
xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
- if (unlikely(tp##_is_signaling_nan(xa.fld) || \
- tp##_is_signaling_nan(xb.fld))) { \
+ if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
} \
@@ -2446,8 +2446,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
for (i = 0; i < nels; i++) { \
if (unlikely(tp##_is_any_nan(xa.fld) || \
tp##_is_any_nan(xb.fld))) { \
- if (tp##_is_signaling_nan(xa.fld) || \
- tp##_is_signaling_nan(xb.fld)) { \
+ if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
if (svxvc) { \
@@ -2500,7 +2500,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
for (i = 0; i < nels; i++) { \
xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
- if (unlikely(stp##_is_signaling_nan(xb.sfld))) { \
+ if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ &env->fp_status))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
} \
@@ -2555,7 +2556,7 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
\
for (i = 0; i < nels; i++) { \
if (unlikely(stp##_is_any_nan(xb.sfld))) { \
- if (stp##_is_signaling_nan(xb.sfld)) { \
+ if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
} \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
@@ -2664,7 +2665,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
} \
\
for (i = 0; i < nels; i++) { \
- if (unlikely(tp##_is_signaling_nan(xb.fld))) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld, \
+ &env->fp_status))) { \
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
xt.fld = tp##_snan_to_qnan(xb.fld); \
} else { \
@@ -2687,19 +2689,19 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
helper_float_check_status(env); \
}
-VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_nearest_even, 1)
+VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
-VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
-VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 5056ac2095..1f5cfd0990 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -16,6 +16,7 @@ DEF_HELPER_1(rfmci, void, env)
DEF_HELPER_2(pminsn, void, env, i32)
DEF_HELPER_1(rfid, void, env)
DEF_HELPER_1(hrfid, void, env)
+DEF_HELPER_2(store_lpcr, void, env, tl)
#endif
DEF_HELPER_1(check_tlb_flush, void, env)
#endif
@@ -599,6 +600,8 @@ DEF_HELPER_2(store_601_rtcl, void, env, tl)
DEF_HELPER_2(store_601_rtcu, void, env, tl)
DEF_HELPER_1(load_decr, tl, env)
DEF_HELPER_2(store_decr, void, env, tl)
+DEF_HELPER_1(load_hdecr, tl, env)
+DEF_HELPER_2(store_hdecr, void, env, tl)
DEF_HELPER_2(store_hid0_601, void, env, tl)
DEF_HELPER_3(store_403_pbr, void, env, i32, tl)
DEF_HELPER_1(load_40x_pit, tl, env)
diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
index 8fc09344db..8fdfa5c7e6 100644
--- a/target-ppc/helper_regs.h
+++ b/target-ppc/helper_regs.h
@@ -136,6 +136,10 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
/* Change the exception prefix on PowerPC 601 */
env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
}
+ /* If PR=1 then EE, IR and DR must be 1 */
+ if ((value >> MSR_PR) & 1) {
+ value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
+ }
#endif
env->msr = value;
hreg_compute_hflags(env);
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 5b7b5e9eb1..82c2186bcf 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -450,9 +450,47 @@ void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
}
}
+static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
+ uint64_t pte0, uint64_t pte1)
+{
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ if (sps->page_shift != 12) {
+ /* 4kiB page in a non 4kiB segment */
+ return 0;
+ }
+ /* Normal 4kiB page */
+ return 12;
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_page_size *ps = &sps->enc[i];
+ uint64_t mask;
+
+ if (!ps->page_shift) {
+ break;
+ }
+
+ if (ps->page_shift == 12) {
+ /* L bit is set so this can't be a 4kiB page */
+ continue;
+ }
+
+ mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+ if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
+ return ps->page_shift;
+ }
+ }
+
+ return 0; /* Bad page size encoding */
+}
+
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
- bool secondary, target_ulong ptem,
- ppc_hash_pte64_t *pte)
+ const struct ppc_one_seg_page_size *sps,
+ target_ulong ptem,
+ ppc_hash_pte64_t *pte, unsigned *pshift)
{
CPUPPCState *env = &cpu->env;
int i;
@@ -469,9 +507,22 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
pte0 = ppc_hash64_load_hpte0(cpu, token, i);
pte1 = ppc_hash64_load_hpte1(cpu, token, i);
- if ((pte0 & HPTE64_V_VALID)
- && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
- && HPTE64_V_COMPARE(pte0, ptem)) {
+ /* This compares V, B, H (secondary) and the AVPN */
+ if (HPTE64_V_COMPARE(pte0, ptem)) {
+ *pshift = hpte_page_shift(sps, pte0, pte1);
+ /*
+ * If there is no match, ignore the PTE, it could simply
+ * be for a different segment size encoding and the
+ * architecture specifies we should not match. Linux will
+ * potentially leave behind PTEs for the wrong base page
+ * size when demoting segments.
+ */
+ if (*pshift == 0) {
+ continue;
+ }
+ /* We don't do anything with pshift yet as qemu TLB only deals
+ * with 4K pages anyway
+ */
pte->pte0 = pte0;
pte->pte1 = pte1;
ppc_hash64_stop_access(cpu, token);
@@ -487,31 +538,40 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
ppc_slb_t *slb, target_ulong eaddr,
- ppc_hash_pte64_t *pte)
+ ppc_hash_pte64_t *pte, unsigned *pshift)
{
CPUPPCState *env = &cpu->env;
hwaddr pte_offset;
hwaddr hash;
uint64_t vsid, epnmask, epn, ptem;
+ const struct ppc_one_seg_page_size *sps = slb->sps;
/* The SLB store path should prevent any bad page size encodings
* getting in there, so: */
- assert(slb->sps);
+ assert(sps);
- epnmask = ~((1ULL << slb->sps->page_shift) - 1);
+ /* If ISL is set in LPCR we need to clamp the page size to 4K */
+ if (env->spr[SPR_LPCR] & LPCR_ISL) {
+ /* We assume that when using TCG, 4k is first entry of SPS */
+ sps = &env->sps.sps[0];
+ assert(sps->page_shift == 12);
+ }
+
+ epnmask = ~((1ULL << sps->page_shift) - 1);
if (slb->vsid & SLB_VSID_B) {
/* 1TB segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
- hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
+ hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
} else {
/* 256M segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
- hash = vsid ^ (epn >> slb->sps->page_shift);
+ hash = vsid ^ (epn >> sps->page_shift);
}
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
+ ptem |= HPTE64_V_VALID;
/* Page address translation */
qemu_log_mask(CPU_LOG_MMU,
@@ -525,68 +585,30 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
+ ptem |= HPTE64_V_SECONDARY;
qemu_log_mask(CPU_LOG_MMU,
"1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
}
return pte_offset;
}
-static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
- uint64_t pte0, uint64_t pte1)
-{
- int i;
-
- if (!(pte0 & HPTE64_V_LARGE)) {
- if (sps->page_shift != 12) {
- /* 4kiB page in a non 4kiB segment */
- return 0;
- }
- /* Normal 4kiB page */
- return 12;
- }
-
- for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
- const struct ppc_one_page_size *ps = &sps->enc[i];
- uint64_t mask;
-
- if (!ps->page_shift) {
- break;
- }
-
- if (ps->page_shift == 12) {
- /* L bit is set so this can't be a 4kiB page */
- continue;
- }
-
- mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
-
- if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
- return ps->page_shift;
- }
- }
-
- return 0; /* Bad page size encoding */
-}
-
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
- uint64_t pte0, uint64_t pte1,
- unsigned *seg_page_shift)
+ uint64_t pte0, uint64_t pte1)
{
CPUPPCState *env = &cpu->env;
int i;
if (!(pte0 & HPTE64_V_LARGE)) {
- *seg_page_shift = 12;
return 12;
}
@@ -604,12 +626,10 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
shift = hpte_page_shift(sps, pte0, pte1);
if (shift) {
- *seg_page_shift = sps->page_shift;
return shift;
}
}
- *seg_page_shift = 0;
return 0;
}
@@ -670,11 +690,52 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
assert((rwx == 0) || (rwx == 1) || (rwx == 2));
+ /* Note on LPCR usage: 970 uses HID4, but our special variant
+ * of store_spr copies relevant fields into env->spr[SPR_LPCR].
+ * Similarily we filter unimplemented bits when storing into
+ * LPCR depending on the MMU version. This code can thus just
+ * use the LPCR "as-is".
+ */
+
/* 1. Handle real mode accesses */
if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
- /* Translation is off */
- /* In real mode the top 4 effective address bits are ignored */
+ /* Translation is supposedly "off" */
+ /* In real mode the top 4 effective address bits are (mostly) ignored */
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
+
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if (msr_hv || !env->has_hv_mode) {
+ if (!(eaddr >> 63)) {
+ raddr |= env->spr[SPR_HRMOR];
+ }
+ } else {
+ /* Otherwise, check VPM for RMA vs VRMA */
+ if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+ slb = &env->vrma_slb;
+ if (slb->sps) {
+ goto skip_slb_search;
+ }
+ /* Not much else to do here */
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ env->error_code = 0;
+ return 1;
+ } else if (raddr < env->rmls) {
+ /* RMA. Check bounds in RMLS */
+ raddr |= env->spr[SPR_RMOR];
+ } else {
+ /* The access failed, generate the approriate interrupt */
+ if (rwx == 2) {
+ ppc_hash64_set_isi(cs, env, 0x08000000);
+ } else {
+ dsisr = 0x08000000;
+ if (rwx == 1) {
+ dsisr |= 0x02000000;
+ }
+ ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
+ }
+ return 1;
+ }
+ }
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
TARGET_PAGE_SIZE);
@@ -683,7 +744,6 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
/* 2. Translation is on, so look up the SLB */
slb = slb_lookup(cpu, eaddr);
-
if (!slb) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISEG;
@@ -696,6 +756,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
return 1;
}
+skip_slb_search:
+
/* 3. Check for segment level no-execute violation */
if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
ppc_hash64_set_isi(cs, env, 0x10000000);
@@ -703,7 +765,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
}
/* 4. Locate the PTE in the hash table */
- pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
if (pte_offset == -1) {
dsisr = 0x40000000;
if (rwx == 2) {
@@ -719,18 +781,6 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
qemu_log_mask(CPU_LOG_MMU,
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
- /* Validate page size encoding */
- apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
- if (!apshift) {
- error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
- " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
- /* Not entirely sure what the right action here, but machine
- * check seems reasonable */
- cs->exception_index = POWERPC_EXCP_MCHECK;
- env->error_code = 0;
- return 1;
- }
-
/* 5. Check access permissions */
pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
@@ -790,27 +840,41 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
- hwaddr pte_offset;
+ hwaddr pte_offset, raddr;
ppc_hash_pte64_t pte;
unsigned apshift;
+ /* Handle real mode */
if (msr_dr == 0) {
/* In real mode the top 4 effective address bits are ignored */
- return addr & 0x0FFFFFFFFFFFFFFFULL;
- }
+ raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
- slb = slb_lookup(cpu, addr);
- if (!slb) {
- return -1;
- }
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
+ return raddr | env->spr[SPR_HRMOR];
+ }
- pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
- if (pte_offset == -1) {
- return -1;
+ /* Otherwise, check VPM for RMA vs VRMA */
+ if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+ slb = &env->vrma_slb;
+ if (!slb->sps) {
+ return -1;
+ }
+ } else if (raddr < env->rmls) {
+ /* RMA. Check bounds in RMLS */
+ return raddr | env->spr[SPR_RMOR];
+ } else {
+ return -1;
+ }
+ } else {
+ slb = slb_lookup(cpu, addr);
+ if (!slb) {
+ return -1;
+ }
}
- apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
- if (!apshift) {
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
+ if (pte_offset == -1) {
return -1;
}
@@ -851,3 +915,146 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
*/
tlb_flush(CPU(cpu), 1);
}
+
+void ppc_hash64_update_rmls(CPUPPCState *env)
+{
+ uint64_t lpcr = env->spr[SPR_LPCR];
+
+ /*
+ * This is the full 4 bits encoding of POWER8. Previous
+ * CPUs only support a subset of these but the filtering
+ * is done when writing LPCR
+ */
+ switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
+ case 0x8: /* 32MB */
+ env->rmls = 0x2000000ull;
+ break;
+ case 0x3: /* 64MB */
+ env->rmls = 0x4000000ull;
+ break;
+ case 0x7: /* 128MB */
+ env->rmls = 0x8000000ull;
+ break;
+ case 0x4: /* 256MB */
+ env->rmls = 0x10000000ull;
+ break;
+ case 0x2: /* 1GB */
+ env->rmls = 0x40000000ull;
+ break;
+ case 0x1: /* 16GB */
+ env->rmls = 0x400000000ull;
+ break;
+ default:
+ /* What to do here ??? */
+ env->rmls = 0;
+ }
+}
+
+void ppc_hash64_update_vrma(CPUPPCState *env)
+{
+ const struct ppc_one_seg_page_size *sps = NULL;
+ target_ulong esid, vsid, lpcr;
+ ppc_slb_t *slb = &env->vrma_slb;
+ uint32_t vrmasd;
+ int i;
+
+ /* First clear it */
+ slb->esid = slb->vsid = 0;
+ slb->sps = NULL;
+
+ /* Is VRMA enabled ? */
+ lpcr = env->spr[SPR_LPCR];
+ if (!(lpcr & LPCR_VPM0)) {
+ return;
+ }
+
+ /* Make one up. Mostly ignore the ESID which will not be
+ * needed for translation
+ */
+ vsid = SLB_VSID_VRMA;
+ vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+ vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
+ esid = SLB_ESID_V;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
+ " vsid 0x"TARGET_FMT_lx, esid, vsid);
+ return;
+ }
+
+ slb->vsid = vsid;
+ slb->esid = esid;
+ slb->sps = sps;
+}
+
+void helper_store_lpcr(CPUPPCState *env, target_ulong val)
+{
+ uint64_t lpcr = 0;
+
+ /* Filter out bits */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_64B: /* 970 */
+ if (val & 0x40) {
+ lpcr |= LPCR_LPES0;
+ }
+ if (val & 0x8000000000000000ull) {
+ lpcr |= LPCR_LPES1;
+ }
+ if (val & 0x20) {
+ lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
+ }
+ if (val & 0x4000000000000000ull) {
+ lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
+ }
+ if (val & 0x2000000000000000ull) {
+ lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
+ }
+ env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
+
+ /* XXX We could also write LPID from HID4 here
+ * but since we don't tag any translation on it
+ * it doesn't actually matter
+ */
+ /* XXX For proper emulation of 970 we also need
+ * to dig HRMOR out of HID5
+ */
+ break;
+ case POWERPC_MMU_2_03: /* P5p */
+ lpcr = val & (LPCR_RMLS | LPCR_ILE |
+ LPCR_LPES0 | LPCR_LPES1 |
+ LPCR_RMI | LPCR_HDICE);
+ break;
+ case POWERPC_MMU_2_06: /* P7 */
+ lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
+ LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
+ LPCR_MER | LPCR_TC |
+ LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
+ break;
+ case POWERPC_MMU_2_07: /* P8 */
+ lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
+ LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
+ LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
+ LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
+ break;
+ default:
+ ;
+ }
+ env->spr[SPR_LPCR] = lpcr;
+ ppc_hash64_update_rmls(env);
+ ppc_hash64_update_vrma(env);
+}
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index 6423b9f791..3a7476b30a 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -17,8 +17,9 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1);
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
- uint64_t pte0, uint64_t pte1,
- unsigned *seg_page_shift);
+ uint64_t pte0, uint64_t pte1);
+void ppc_hash64_update_vrma(CPUPPCState *env);
+void ppc_hash64_update_rmls(CPUPPCState *env);
#endif
/*
@@ -37,6 +38,7 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
#define SLB_VSID_B_256M 0x0000000000000000ULL
#define SLB_VSID_B_1T 0x4000000000000000ULL
#define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL
+#define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T)
#define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID)
#define SLB_VSID_KS 0x0000000000000800ULL
#define SLB_VSID_KP 0x0000000000000400ULL
@@ -63,7 +65,7 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
#define HPTE64_V_AVPN_SHIFT 7
#define HPTE64_V_AVPN 0x3fffffffffffff80ULL
#define HPTE64_V_AVPN_VAL(x) (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT)
-#define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff80ULL))
+#define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff83ULL))
#define HPTE64_V_LARGE 0x0000000000000004ULL
#define HPTE64_V_SECONDARY 0x0000000000000002ULL
#define HPTE64_V_VALID 0x0000000000000001ULL
diff --git a/target-ppc/timebase_helper.c b/target-ppc/timebase_helper.c
index 66de3137e4..a07faa42cb 100644
--- a/target-ppc/timebase_helper.c
+++ b/target-ppc/timebase_helper.c
@@ -102,6 +102,16 @@ void helper_store_decr(CPUPPCState *env, target_ulong val)
cpu_ppc_store_decr(env, val);
}
+target_ulong helper_load_hdecr(CPUPPCState *env)
+{
+ return cpu_ppc_load_hdecr(env);
+}
+
+void helper_store_hdecr(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_hdecr(env, val);
+}
+
target_ulong helper_load_40x_pit(CPUPPCState *env)
{
return load_40x_pit(env);
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 2f1c59166e..92030b66a5 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -1471,7 +1471,7 @@ static void gen_or(DisasContext *ctx)
} else if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rs]);
#if defined(TARGET_PPC64)
- } else {
+ } else if (rs != 0) { /* 0 is nop */
int prio = 0;
switch (rs) {
@@ -1514,7 +1514,6 @@ static void gen_or(DisasContext *ctx)
break;
#endif
default:
- /* nop */
break;
}
if (prio) {
@@ -1524,13 +1523,15 @@ static void gen_or(DisasContext *ctx)
tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
gen_store_spr(SPR_PPR, t0);
tcg_temp_free(t0);
- /* Pause us out of TCG otherwise spin loops with smt_low
- * eat too much CPU and the kernel hangs
- */
+ }
#if !defined(CONFIG_USER_ONLY)
- gen_pause(ctx);
+ /* Pause out of TCG otherwise spin loops with smt_low eat too much
+ * CPU and the kernel hangs. This applies to all encodings other
+ * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30),
+ * and all currently undefined.
+ */
+ gen_pause(ctx);
#endif
- }
#endif
}
}
@@ -11407,6 +11408,13 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
env->spr[SPR_SPRG4], env->spr[SPR_SPRG5],
env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]);
+#if defined(TARGET_PPC64)
+ if (env->excp_model == POWERPC_EXCP_POWER7 ||
+ env->excp_model == POWERPC_EXCP_POWER8) {
+ cpu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n",
+ env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+ }
+#endif
if (env->excp_model == POWERPC_EXCP_BOOKE) {
cpu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx
" MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n",
diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index 08bdd07751..8f257fb74a 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -277,6 +277,32 @@ static void spr_read_purr (DisasContext *ctx, int gprn, int sprn)
{
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
}
+
+/* HDECR */
+static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
#endif
#endif
@@ -7525,16 +7551,6 @@ static void gen_spr_970_hior(CPUPPCState *env)
0x00000000);
}
-static void gen_spr_970_lpar(CPUPPCState *env)
-{
- /* Logical partitionning */
- /* PPC970: HID4 is effectively the LPCR */
- spr_register(env, SPR_970_HID4, "HID4",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
-}
-
static void gen_spr_book3s_common(CPUPPCState *env)
{
spr_register(env, SPR_CTRL, "SPR_CTRL",
@@ -7787,21 +7803,155 @@ static void gen_spr_power5p_ear(CPUPPCState *env)
0x00000000);
}
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv hmer = tcg_temp_new();
+
+ gen_load_spr(hmer, sprn);
+ tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
+ gen_store_spr(sprn, hmer);
+ spr_store_dump_spr(sprn);
+ tcg_temp_free(hmer);
+}
+
+static void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_970_hid4(DisasContext *ctx, int sprn, int gprn)
+{
+#if defined(TARGET_PPC64)
+ spr_write_generic(ctx, sprn, gprn);
+ gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+#endif
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+static void gen_spr_970_lpar(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* Logical partitionning */
+ /* PPC970: HID4 is effectively the LPCR */
+ spr_register(env, SPR_970_HID4, "HID4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_970_hid4,
+ 0x00000000);
+#endif
+}
+
static void gen_spr_power5p_lpar(CPUPPCState *env)
{
+#if !defined(CONFIG_USER_ONLY)
/* Logical partitionning */
- spr_register_kvm(env, SPR_LPCR, "LPCR",
- SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
- KVM_REG_PPC_LPCR, 0x00000000);
+ spr_register_kvm_hv(env, SPR_LPCR, "LPCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_lpcr,
+ KVM_REG_PPC_LPCR, LPCR_LPES0 | LPCR_LPES1);
+ spr_register_hv(env, SPR_HDEC, "HDEC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hdecr, &spr_write_hdecr, 0);
+#endif
}
static void gen_spr_book3s_ids(CPUPPCState *env)
{
+ /* FIXME: Will need to deal with thread vs core only SPRs */
+
/* Processor identification */
- spr_register(env, SPR_PIR, "PIR",
+ spr_register_hv(env, SPR_PIR, "PIR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_pir,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, NULL,
+ 0x00000000);
+ spr_register_hv(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TSCR, "TSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMER, "HMER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hmer,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMEER, "HMEER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TFMR, "TFMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_LPIDR, "LPIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HFSCR, "HFSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_MMCRC, "MMCRC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_MMCRH, "MMCRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSPRG0, "HSPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSPRG1, "HSPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSRR0, "HSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSRR1, "HSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HDAR, "HDAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HDSISR, "HDSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_RMOR, "RMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HRMOR, "HRMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
}
@@ -8060,6 +8210,17 @@ static void gen_spr_power7_book4(CPUPPCState *env)
#endif
}
+static void gen_spr_power8_rpr(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_RPR, "RPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000103070F1F3F);
+#endif
+}
+
static void init_proc_book3s_64(CPUPPCState *env, int version)
{
gen_spr_ne_601(env);
@@ -8117,6 +8278,7 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
gen_spr_vtb(env);
gen_spr_power8_ic(env);
gen_spr_power8_book4(env);
+ gen_spr_power8_rpr(env);
}
if (version < BOOK3S_CPU_POWER8) {
gen_spr_book3s_dbg(env);
@@ -8629,11 +8791,19 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu)
/* Set emulated LPCR to not send interrupts to hypervisor. Note that
* under KVM, the actual HW LPCR will be set differently by KVM itself,
* the settings below ensure proper operations with TCG in absence of
- * a real hypervisor
+ * a real hypervisor.
+ *
+ * Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for
+ * real mode accesses, which thankfully defaults to 0 and isn't
+ * accessible in guest mode.
*/
lpcr->default_value &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV);
lpcr->default_value |= LPCR_LPES0 | LPCR_LPES1;
+ /* Set RMLS to the max (ie, 16G) */
+ lpcr->default_value &= ~LPCR_RMLS;
+ lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT;
+
/* P7 and P8 has slightly different PECE bits, mostly because P8 adds
* bit 47 and 48 which are reserved on P7. Here we set them all, which
* will work as expected for both implementations
@@ -8649,6 +8819,10 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu)
/* Set a full AMOR so guest can use the AMR as it sees fit */
env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull;
+ /* Update some env bits based on new LPCR value */
+ ppc_hash64_update_rmls(env);
+ ppc_hash64_update_vrma(env);
+
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
kvmppc_set_papr(cpu);
@@ -9354,7 +9528,7 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
Error *local_err = NULL;
#if !defined(CONFIG_USER_ONLY)
- int max_smt = kvm_enabled() ? kvmppc_smt_threads() : 1;
+ int max_smt = kvmppc_smt_threads();
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -10131,20 +10305,36 @@ static void ppc_cpu_initfn(Object *obj)
if (pcc->sps) {
env->sps = *pcc->sps;
} else if (env->mmu_model & POWERPC_MMU_64) {
- /* Use default sets of page sizes */
- static const struct ppc_segment_page_sizes defsps = {
+ /* Use default sets of page sizes. We don't support MPSS */
+ static const struct ppc_segment_page_sizes defsps_4k = {
+ .sps = {
+ { .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 } }
+ },
+ { .page_shift = 24, /* 16M */
+ .slb_enc = 0x100,
+ .enc = { { .page_shift = 24, .pte_enc = 0 } }
+ },
+ },
+ };
+ static const struct ppc_segment_page_sizes defsps_64k = {
.sps = {
{ .page_shift = 12, /* 4K */
.slb_enc = 0,
.enc = { { .page_shift = 12, .pte_enc = 0 } }
},
+ { .page_shift = 16, /* 64K */
+ .slb_enc = 0x110,
+ .enc = { { .page_shift = 16, .pte_enc = 1 } }
+ },
{ .page_shift = 24, /* 16M */
.slb_enc = 0x100,
.enc = { { .page_shift = 24, .pte_enc = 0 } }
},
},
};
- env->sps = defsps;
+ env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : defsps_4k;
}
#endif /* defined(TARGET_PPC64) */
diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h
index bd6b2e57ef..8bcb0f75f3 100644
--- a/target-s390x/cpu.h
+++ b/target-s390x/cpu.h
@@ -463,7 +463,6 @@ S390CPU *cpu_s390x_init(const char *cpu_model);
S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp);
S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp);
void s390x_translate_init(void);
-int cpu_s390x_exec(CPUState *cpu);
/* you can call this signal handler from your SIGBUS and SIGSEGV
signal handlers to inform the virtual CPU of exceptions. non zero
@@ -627,7 +626,6 @@ void cpu_unlock(void);
extern void subsystem_reset(void);
#define cpu_init(model) CPU(cpu_s390x_init(model))
-#define cpu_exec cpu_s390x_exec
#define cpu_signal_handler cpu_s390x_signal_handler
void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
diff --git a/target-s390x/fpu_helper.c b/target-s390x/fpu_helper.c
index 4ddb388392..e604e9f7be 100644
--- a/target-s390x/fpu_helper.c
+++ b/target-s390x/fpu_helper.c
@@ -267,7 +267,7 @@ uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2)
{
float64 ret = float32_to_float64(f2, &env->fpu_status);
handle_exceptions(env, GETPC());
- return float64_maybe_silence_nan(ret);
+ return float64_maybe_silence_nan(ret, &env->fpu_status);
}
/* convert 128-bit float to 64-bit float */
@@ -275,7 +275,7 @@ uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al)
{
float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status);
handle_exceptions(env, GETPC());
- return float64_maybe_silence_nan(ret);
+ return float64_maybe_silence_nan(ret, &env->fpu_status);
}
/* convert 64-bit float to 128-bit float */
@@ -283,7 +283,7 @@ uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2)
{
float128 ret = float64_to_float128(f2, &env->fpu_status);
handle_exceptions(env, GETPC());
- return RET128(float128_maybe_silence_nan(ret));
+ return RET128(float128_maybe_silence_nan(ret, &env->fpu_status));
}
/* convert 32-bit float to 128-bit float */
@@ -291,7 +291,7 @@ uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2)
{
float128 ret = float32_to_float128(f2, &env->fpu_status);
handle_exceptions(env, GETPC());
- return RET128(float128_maybe_silence_nan(ret));
+ return RET128(float128_maybe_silence_nan(ret, &env->fpu_status));
}
/* convert 64-bit float to 32-bit float */
@@ -299,7 +299,7 @@ uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2)
{
float32 ret = float64_to_float32(f2, &env->fpu_status);
handle_exceptions(env, GETPC());
- return float32_maybe_silence_nan(ret);
+ return float32_maybe_silence_nan(ret, &env->fpu_status);
}
/* convert 128-bit float to 32-bit float */
@@ -307,7 +307,7 @@ uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al)
{
float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status);
handle_exceptions(env, GETPC());
- return float32_maybe_silence_nan(ret);
+ return float32_maybe_silence_nan(ret, &env->fpu_status);
}
/* 32-bit FP compare */
@@ -624,7 +624,7 @@ uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1,
}
/* test data class 32-bit */
-uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2)
+uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2)
{
float32 v1 = f1;
int neg = float32_is_neg(v1);
@@ -633,7 +633,8 @@ uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2)
if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
(float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
(float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
- (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ (float32_is_signaling_nan(v1, &env->fpu_status) &&
+ (m2 & (1 << (1-neg))))) {
cc = 1;
} else if (m2 & (1 << (9-neg))) {
/* assume normalized number */
@@ -644,7 +645,7 @@ uint32_t HELPER(tceb)(uint64_t f1, uint64_t m2)
}
/* test data class 64-bit */
-uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2)
+uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2)
{
int neg = float64_is_neg(v1);
uint32_t cc = 0;
@@ -652,7 +653,8 @@ uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2)
if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
(float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
(float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
- (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ (float64_is_signaling_nan(v1, &env->fpu_status) &&
+ (m2 & (1 << (1-neg))))) {
cc = 1;
} else if (m2 & (1 << (9-neg))) {
/* assume normalized number */
@@ -663,7 +665,8 @@ uint32_t HELPER(tcdb)(uint64_t v1, uint64_t m2)
}
/* test data class 128-bit */
-uint32_t HELPER(tcxb)(uint64_t ah, uint64_t al, uint64_t m2)
+uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah,
+ uint64_t al, uint64_t m2)
{
float128 v1 = make_float128(ah, al);
int neg = float128_is_neg(v1);
@@ -672,7 +675,8 @@ uint32_t HELPER(tcxb)(uint64_t ah, uint64_t al, uint64_t m2)
if ((float128_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
(float128_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
(float128_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
- (float128_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
+ (float128_is_signaling_nan(v1, &env->fpu_status) &&
+ (m2 & (1 << (1-neg))))) {
cc = 1;
} else if (m2 & (1 << (9-neg))) {
/* assume normalized number */
diff --git a/target-s390x/helper.h b/target-s390x/helper.h
index 7e06119e99..207a6e7d1c 100644
--- a/target-s390x/helper.h
+++ b/target-s390x/helper.h
@@ -67,9 +67,9 @@ DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
-DEF_HELPER_FLAGS_2(tceb, TCG_CALL_NO_RWG_SE, i32, i64, i64)
-DEF_HELPER_FLAGS_2(tcdb, TCG_CALL_NO_RWG_SE, i32, i64, i64)
-DEF_HELPER_FLAGS_3(tcxb, TCG_CALL_NO_RWG_SE, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64)
DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
diff --git a/target-s390x/translate.c b/target-s390x/translate.c
index 3c3487a5a9..1a07d70b21 100644
--- a/target-s390x/translate.c
+++ b/target-s390x/translate.c
@@ -3986,21 +3986,21 @@ static ExitStatus op_svc(DisasContext *s, DisasOps *o)
static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
{
- gen_helper_tceb(cc_op, o->in1, o->in2);
+ gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
{
- gen_helper_tcdb(cc_op, o->in1, o->in2);
+ gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
set_cc_static(s);
return NO_EXIT;
}
static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
{
- gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
+ gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
set_cc_static(s);
return NO_EXIT;
}
diff --git a/target-sh4/cpu.c b/target-sh4/cpu.c
index 794b625d8e..f589532e18 100644
--- a/target-sh4/cpu.c
+++ b/target-sh4/cpu.c
@@ -71,6 +71,7 @@ static void superh_cpu_reset(CPUState *s)
set_flush_to_zero(1, &env->fp_status);
#endif
set_default_nan_mode(1, &env->fp_status);
+ set_snan_bit_is_one(1, &env->fp_status);
}
static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
diff --git a/target-sh4/cpu.h b/target-sh4/cpu.h
index 3f5c689eb3..3f9dae2d1f 100644
--- a/target-sh4/cpu.h
+++ b/target-sh4/cpu.h
@@ -221,7 +221,6 @@ int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void sh4_translate_init(void);
SuperHCPU *cpu_sh4_init(const char *cpu_model);
-int cpu_sh4_exec(CPUState *s);
int cpu_sh4_signal_handler(int host_signum, void *pinfo,
void *puc);
int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
@@ -254,7 +253,6 @@ void cpu_load_tlb(CPUSH4State * env);
#define cpu_init(cpu_model) CPU(cpu_sh4_init(cpu_model))
-#define cpu_exec cpu_sh4_exec
#define cpu_signal_handler cpu_sh4_signal_handler
#define cpu_list sh4_cpu_list
diff --git a/target-sparc/cpu.h b/target-sparc/cpu.h
index ba37f4b94e..f78fabfe7b 100644
--- a/target-sparc/cpu.h
+++ b/target-sparc/cpu.h
@@ -565,7 +565,6 @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
void gen_intermediate_code_init(CPUSPARCState *env);
/* cpu-exec.c */
-int cpu_sparc_exec(CPUState *cpu);
/* win_helper.c */
target_ulong cpu_get_psr(CPUSPARCState *env1);
@@ -626,7 +625,6 @@ int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
#define cpu_init(cpu_model) CPU(cpu_sparc_init(cpu_model))
#endif
-#define cpu_exec cpu_sparc_exec
#define cpu_signal_handler cpu_sparc_signal_handler
#define cpu_list sparc_cpu_list
diff --git a/target-sparc/translate.c b/target-sparc/translate.c
index afd46b878f..0f4faf7062 100644
--- a/target-sparc/translate.c
+++ b/target-sparc/translate.c
@@ -4679,12 +4679,15 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
case 0xd: /* ldstub -- XXX: should be atomically */
{
TCGv r_const;
+ TCGv tmp = tcg_temp_new();
gen_address_mask(dc, cpu_addr);
- tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
+ tcg_gen_qemu_ld8u(tmp, cpu_addr, dc->mem_idx);
r_const = tcg_const_tl(0xff);
tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
+ tcg_gen_mov_tl(cpu_val, tmp);
tcg_temp_free(r_const);
+ tcg_temp_free(tmp);
}
break;
case 0x0f:
diff --git a/target-tilegx/cpu.h b/target-tilegx/cpu.h
index b9b588de53..d74032925b 100644
--- a/target-tilegx/cpu.h
+++ b/target-tilegx/cpu.h
@@ -158,14 +158,12 @@ static inline TileGXCPU *tilegx_env_get_cpu(CPUTLGState *env)
#include "exec/cpu-all.h"
void tilegx_tcg_init(void);
-int cpu_tilegx_exec(CPUState *s);
int cpu_tilegx_signal_handler(int host_signum, void *pinfo, void *puc);
TileGXCPU *cpu_tilegx_init(const char *cpu_model);
#define cpu_init(cpu_model) CPU(cpu_tilegx_init(cpu_model))
-#define cpu_exec cpu_tilegx_exec
#define cpu_signal_handler cpu_tilegx_signal_handler
static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,
diff --git a/target-tricore/cpu.h b/target-tricore/cpu.h
index 3c6f7b75b8..a298d63eea 100644
--- a/target-tricore/cpu.h
+++ b/target-tricore/cpu.h
@@ -374,7 +374,6 @@ void fpu_set_state(CPUTriCoreState *env);
void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf);
-#define cpu_exec cpu_tricore_exec
#define cpu_signal_handler cpu_tricore_signal_handler
#define cpu_list tricore_cpu_list
@@ -400,7 +399,6 @@ enum {
};
void cpu_state_reset(CPUTriCoreState *s);
-int cpu_tricore_exec(CPUState *cpu);
void tricore_tcg_init(void);
int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc);
diff --git a/target-unicore32/cpu.c b/target-unicore32/cpu.c
index 3990433eb8..e7a4984260 100644
--- a/target-unicore32/cpu.c
+++ b/target-unicore32/cpu.c
@@ -78,6 +78,7 @@ static void unicore_ii_cpu_initfn(Object *obj)
set_feature(env, UC32_HWCAP_CMOV);
set_feature(env, UC32_HWCAP_UCF64);
+ set_snan_bit_is_one(1, &env->ucf64.fp_status);
}
static void uc32_any_cpu_initfn(Object *obj)
@@ -90,6 +91,7 @@ static void uc32_any_cpu_initfn(Object *obj)
set_feature(env, UC32_HWCAP_CMOV);
set_feature(env, UC32_HWCAP_UCF64);
+ set_snan_bit_is_one(1, &env->ucf64.fp_status);
}
static const UniCore32CPUInfo uc32_cpus[] = {
diff --git a/target-unicore32/cpu.h b/target-unicore32/cpu.h
index f3e877bbc0..83f758496a 100644
--- a/target-unicore32/cpu.h
+++ b/target-unicore32/cpu.h
@@ -149,7 +149,6 @@ void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask)
#define UC32_HWCAP_CMOV 4 /* 1 << 2 */
#define UC32_HWCAP_UCF64 8 /* 1 << 3 */
-#define cpu_exec uc32_cpu_exec
#define cpu_signal_handler uc32_cpu_signal_handler
int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
@@ -165,8 +164,6 @@ static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
#include "exec/cpu-all.h"
-int uc32_cpu_exec(CPUState *s);
-
UniCore32CPU *uc32_cpu_init(const char *cpu_model);
#define cpu_init(cpu_model) CPU(uc32_cpu_init(cpu_model))
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index 442176a2b2..8477ec963a 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -416,7 +416,6 @@ int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
int is_write, int is_user, uintptr_t retaddr);
-#define cpu_exec cpu_xtensa_exec
#define cpu_signal_handler cpu_xtensa_signal_handler
#define cpu_list xtensa_cpu_list
@@ -432,7 +431,6 @@ XtensaCPU *cpu_xtensa_init(const char *cpu_model);
void xtensa_translate_init(void);
void xtensa_breakpoint_handler(CPUState *cs);
-int cpu_xtensa_exec(CPUState *cpu);
void xtensa_finalize_config(XtensaConfig *config);
void xtensa_register_core(XtensaConfigList *node);
void check_interrupts(CPUXtensaState *s);
diff --git a/tests/Makefile.include b/tests/Makefile.include
index fd2dba49a7..f8e3c6b35a 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -86,7 +86,7 @@ check-unit-y += tests/test-qemu-opts$(EXESUF)
gcov-files-test-qemu-opts-y = qom/test-qemu-opts.c
check-unit-y += tests/test-write-threshold$(EXESUF)
gcov-files-test-write-threshold-y = block/write-threshold.c
-check-unit-$(CONFIG_GNUTLS_HASH) += tests/test-crypto-hash$(EXESUF)
+check-unit-y += tests/test-crypto-hash$(EXESUF)
check-unit-y += tests/test-crypto-cipher$(EXESUF)
check-unit-y += tests/test-crypto-secret$(EXESUF)
check-unit-$(CONFIG_GNUTLS) += tests/test-crypto-tlscredsx509$(EXESUF)
@@ -251,7 +251,7 @@ check-qtest-sparc64-y = tests/endianness-test$(EXESUF)
gcov-files-sparc-y += hw/timer/m48t59.c
gcov-files-sparc64-y += hw/timer/m48t59.c
check-qtest-arm-y = tests/tmp105-test$(EXESUF)
-check-qtest-arm-y = tests/ds1338-test$(EXESUF)
+check-qtest-arm-y += tests/ds1338-test$(EXESUF)
gcov-files-arm-y += hw/misc/tmp105.c
check-qtest-arm-y += tests/virtio-blk-test$(EXESUF)
gcov-files-arm-y += arm-softmmu/hw/block/virtio-blk.c
diff --git a/tests/acpi-test-data/pc/DSDT b/tests/acpi-test-data/pc/DSDT
index 8b4f1a09b8..8053d71105 100644
--- a/tests/acpi-test-data/pc/DSDT
+++ b/tests/acpi-test-data/pc/DSDT
Binary files differ
diff --git a/tests/acpi-test-data/pc/DSDT.bridge b/tests/acpi-test-data/pc/DSDT.bridge
index 0d09b5cc61..850e71a973 100644
--- a/tests/acpi-test-data/pc/DSDT.bridge
+++ b/tests/acpi-test-data/pc/DSDT.bridge
Binary files differ
diff --git a/tests/acpi-test-data/pc/DSDT.ipmikcs b/tests/acpi-test-data/pc/DSDT.ipmikcs
new file mode 100644
index 0000000000..8ac48afb6a
--- /dev/null
+++ b/tests/acpi-test-data/pc/DSDT.ipmikcs
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT b/tests/acpi-test-data/q35/DSDT
index 67445428d9..58fbb3d2e2 100644
--- a/tests/acpi-test-data/q35/DSDT
+++ b/tests/acpi-test-data/q35/DSDT
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT.bridge b/tests/acpi-test-data/q35/DSDT.bridge
index e85f5b1af9..c392802a95 100644
--- a/tests/acpi-test-data/q35/DSDT.bridge
+++ b/tests/acpi-test-data/q35/DSDT.bridge
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT.ipmibt b/tests/acpi-test-data/q35/DSDT.ipmibt
new file mode 100644
index 0000000000..0ea38e1e72
--- /dev/null
+++ b/tests/acpi-test-data/q35/DSDT.ipmibt
Binary files differ
diff --git a/tests/bios-tables-test.c b/tests/bios-tables-test.c
index 16d11aa854..92c90dd194 100644
--- a/tests/bios-tables-test.c
+++ b/tests/bios-tables-test.c
@@ -49,6 +49,8 @@ typedef struct {
GArray *tables;
uint32_t smbios_ep_addr;
struct smbios_21_entry_point smbios_ep_table;
+ uint8_t *required_struct_types;
+ int required_struct_types_len;
} test_data;
#define ACPI_READ_FIELD(field, addr) \
@@ -334,7 +336,7 @@ static void test_acpi_tables(test_data *data)
for (i = 0; i < tables_nr; i++) {
AcpiSdtTable ssdt_table;
- memset(&ssdt_table, 0 , sizeof(ssdt_table));
+ memset(&ssdt_table, 0, sizeof(ssdt_table));
uint32_t addr = data->rsdt_tables_addr[i + 1]; /* fadt is first */
test_dst_table(&ssdt_table, addr);
g_array_append_val(data->tables, ssdt_table);
@@ -661,7 +663,6 @@ static void test_smbios_structs(test_data *data)
uint32_t addr = ep_table->structure_table_address;
int i, len, max_len = 0;
uint8_t type, prv, crt;
- uint8_t required_struct_types[] = {0, 1, 3, 4, 16, 17, 19, 32, 127};
/* walk the smbios tables */
for (i = 0; i < ep_table->number_of_structures; i++) {
@@ -701,8 +702,8 @@ static void test_smbios_structs(test_data *data)
g_assert_cmpuint(ep_table->max_structure_size, ==, max_len);
/* required struct types must all be present */
- for (i = 0; i < ARRAY_SIZE(required_struct_types); i++) {
- g_assert(test_bit(required_struct_types[i], struct_bitmap));
+ for (i = 0; i < data->required_struct_types_len; i++) {
+ g_assert(test_bit(data->required_struct_types[i], struct_bitmap));
}
}
@@ -742,6 +743,10 @@ static void test_acpi_one(const char *params, test_data *data)
g_free(args);
}
+static uint8_t base_required_struct_types[] = {
+ 0, 1, 3, 4, 16, 17, 19, 32, 127
+};
+
static void test_acpi_piix4_tcg(void)
{
test_data data;
@@ -751,6 +756,8 @@ static void test_acpi_piix4_tcg(void)
*/
memset(&data, 0, sizeof(data));
data.machine = MACHINE_PC;
+ data.required_struct_types = base_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types);
test_acpi_one("-machine accel=tcg", &data);
free_test_data(&data);
}
@@ -762,6 +769,8 @@ static void test_acpi_piix4_tcg_bridge(void)
memset(&data, 0, sizeof(data));
data.machine = MACHINE_PC;
data.variant = ".bridge";
+ data.required_struct_types = base_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types);
test_acpi_one("-machine accel=tcg -device pci-bridge,chassis_nr=1", &data);
free_test_data(&data);
}
@@ -772,6 +781,8 @@ static void test_acpi_q35_tcg(void)
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
+ data.required_struct_types = base_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types);
test_acpi_one("-machine q35,accel=tcg", &data);
free_test_data(&data);
}
@@ -783,11 +794,50 @@ static void test_acpi_q35_tcg_bridge(void)
memset(&data, 0, sizeof(data));
data.machine = MACHINE_Q35;
data.variant = ".bridge";
+ data.required_struct_types = base_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(base_required_struct_types);
test_acpi_one("-machine q35,accel=tcg -device pci-bridge,chassis_nr=1",
&data);
free_test_data(&data);
}
+static uint8_t ipmi_required_struct_types[] = {
+ 0, 1, 3, 4, 16, 17, 19, 32, 38, 127
+};
+
+static void test_acpi_q35_tcg_ipmi(void)
+{
+ test_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.machine = MACHINE_Q35;
+ data.variant = ".ipmibt";
+ data.required_struct_types = ipmi_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(ipmi_required_struct_types);
+ test_acpi_one("-machine q35,accel=tcg -device ipmi-bmc-sim,id=bmc0"
+ " -device isa-ipmi-bt,bmc=bmc0",
+ &data);
+ free_test_data(&data);
+}
+
+static void test_acpi_piix4_tcg_ipmi(void)
+{
+ test_data data;
+
+ /* Supplying -machine accel argument overrides the default (qtest).
+ * This is to make guest actually run.
+ */
+ memset(&data, 0, sizeof(data));
+ data.machine = MACHINE_PC;
+ data.variant = ".ipmikcs";
+ data.required_struct_types = ipmi_required_struct_types;
+ data.required_struct_types_len = ARRAY_SIZE(ipmi_required_struct_types);
+ test_acpi_one("-machine accel=tcg -device ipmi-bmc-sim,id=bmc0"
+ " -device isa-ipmi-kcs,irq=0,bmc=bmc0",
+ &data);
+ free_test_data(&data);
+}
+
int main(int argc, char *argv[])
{
const char *arch = qtest_get_arch();
@@ -804,6 +854,8 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/piix4/tcg/bridge", test_acpi_piix4_tcg_bridge);
qtest_add_func("acpi/q35/tcg", test_acpi_q35_tcg);
qtest_add_func("acpi/q35/tcg/bridge", test_acpi_q35_tcg_bridge);
+ qtest_add_func("acpi/piix4/tcg/ipmi", test_acpi_piix4_tcg_ipmi);
+ qtest_add_func("acpi/q35/tcg/ipmi", test_acpi_q35_tcg_ipmi);
}
ret = g_test_run();
boot_sector_cleanup(disk);
diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041
index ed1d9d464c..cbf5e0ba5c 100755
--- a/tests/qemu-iotests/041
+++ b/tests/qemu-iotests/041
@@ -727,6 +727,36 @@ class TestUnbackedSource(iotests.QMPTestCase):
self.complete_and_wait()
self.assert_no_active_block_jobs()
+class TestGranularity(iotests.QMPTestCase):
+ image_len = 10 * 1024 * 1024 # MB
+
+ def setUp(self):
+ qemu_img('create', '-f', iotests.imgfmt, test_img,
+ str(TestGranularity.image_len))
+ qemu_io('-c', 'write 0 %d' % (self.image_len),
+ test_img)
+ self.vm = iotests.VM().add_drive(test_img)
+ self.vm.launch()
+
+ def tearDown(self):
+ self.vm.shutdown()
+ self.assertTrue(iotests.compare_images(test_img, target_img),
+ 'target image does not match source after mirroring')
+ os.remove(test_img)
+ os.remove(target_img)
+
+ def test_granularity(self):
+ self.assert_no_active_block_jobs()
+ result = self.vm.qmp('drive-mirror', device='drive0',
+ sync='full', target=target_img,
+ mode='absolute-paths', granularity=8192)
+ self.assert_qmp(result, 'return', {})
+ event = self.vm.get_qmp_event(wait=60.0)
+ # Failures will manifest as COMPLETED/ERROR.
+ self.assert_qmp(event, 'event', 'BLOCK_JOB_READY')
+ self.complete_and_wait(drive='drive0', wait_ready=False)
+ self.assert_no_active_block_jobs()
+
class TestRepairQuorum(iotests.QMPTestCase):
""" This class test quorum file repair using drive-mirror.
It's mostly a fork of TestSingleDrive """
diff --git a/tests/qemu-iotests/041.out b/tests/qemu-iotests/041.out
index b0cadc8245..b67d0504a6 100644
--- a/tests/qemu-iotests/041.out
+++ b/tests/qemu-iotests/041.out
@@ -1,5 +1,5 @@
-...........................................................................
+............................................................................
----------------------------------------------------------------------
-Ran 75 tests
+Ran 76 tests
OK
diff --git a/tests/qemu-iotests/149 b/tests/qemu-iotests/149
index 52e23d2946..84072513db 100755
--- a/tests/qemu-iotests/149
+++ b/tests/qemu-iotests/149
@@ -153,6 +153,8 @@ def cryptsetup_format(config):
cipher = config.cipher + "-" + config.mode + "-" + config.ivgen
if config.ivgen_hash is not None:
cipher = cipher + ":" + config.ivgen_hash
+ elif config.ivgen == "essiv":
+ cipher = cipher + ":" + "sha256"
args.extend(["--cipher", cipher])
if config.mode == "xts":
args.extend(["--key-size", str(config.keylen * 2)])
@@ -479,6 +481,16 @@ configs = [
"6": "slot6",
"7": "slot7",
}),
+
+ # Check handling of default hash alg (sha256) with essiv
+ LUKSConfig("aes-256-cbc-essiv-auto-sha1",
+ "aes", 256, "cbc", "essiv", None, "sha1"),
+
+ # Check that a useless hash provided for 'plain64' iv gen
+ # is ignored and no error raised
+ LUKSConfig("aes-256-cbc-plain64-sha256-sha1",
+ "aes", 256, "cbc", "plain64", "sha256", "sha1"),
+
]
blacklist = [
diff --git a/tests/qemu-iotests/149.out b/tests/qemu-iotests/149.out
index 287f013012..90b5b55efb 100644
--- a/tests/qemu-iotests/149.out
+++ b/tests/qemu-iotests/149.out
@@ -1878,3 +1878,243 @@ sudo cryptsetup -q -v luksClose qiotest-145-aes-256-xts-plain-sha1-pwallslots
# Delete image
unlink TEST_DIR/luks-aes-256-xts-plain-sha1-pwallslots.img
+# ================= dm-crypt aes-256-cbc-essiv-auto-sha1 =================
+# Create image
+truncate TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img --size 4194304MB
+# Format image
+sudo cryptsetup -q -v luksFormat --cipher aes-cbc-essiv:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Write test pattern 0xa7
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x13
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Read test pattern 0xa7
+qemu-io -c read -P 0xa7 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x13
+qemu-io -c read -P 0x13 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x91
+qemu-io -c write -P 0x91 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x5e
+qemu-io -c write -P 0x5e 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Read test pattern 0x91
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x5e
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Delete image
+unlink TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+
+# ================= qemu-img aes-256-cbc-essiv-auto-sha1 =================
+# Create image
+qemu-img create -f luks --object secret,id=sec0,data=MTIzNDU2,format=base64 -o key-secret=sec0,cipher-alg=aes-256,cipher-mode=cbc,ivgen-alg=essiv,hash-alg=sha1 TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img 4194304M
+Formatting 'TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img', fmt=luks size=4398046511104 key-secret=sec0 cipher-alg=aes-256 cipher-mode=cbc ivgen-alg=essiv hash-alg=sha1
+
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Write test pattern 0xa7
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x13
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Read test pattern 0xa7
+qemu-io -c read -P 0xa7 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x13
+qemu-io -c read -P 0x13 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x91
+qemu-io -c write -P 0x91 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x5e
+qemu-io -c write -P 0x5e 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Read test pattern 0x91
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x5e
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-essiv-auto-sha1
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-essiv-auto-sha1
+# Delete image
+unlink TEST_DIR/luks-aes-256-cbc-essiv-auto-sha1.img
+
+# ================= dm-crypt aes-256-cbc-plain64-sha256-sha1 =================
+# Create image
+truncate TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img --size 4194304MB
+# Format image
+sudo cryptsetup -q -v luksFormat --cipher aes-cbc-plain64:sha256 --key-size 256 --hash sha1 --key-slot 0 --key-file - TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Write test pattern 0xa7
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x13
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Read test pattern 0xa7
+qemu-io -c read -P 0xa7 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x13
+qemu-io -c read -P 0x13 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x91
+qemu-io -c write -P 0x91 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x5e
+qemu-io -c write -P 0x5e 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Read test pattern 0x91
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x5e
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Delete image
+unlink TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+
+# ================= qemu-img aes-256-cbc-plain64-sha256-sha1 =================
+# Create image
+qemu-img create -f luks --object secret,id=sec0,data=MTIzNDU2,format=base64 -o key-secret=sec0,cipher-alg=aes-256,cipher-mode=cbc,ivgen-alg=plain64,hash-alg=sha1,ivgen-hash-alg=sha256 TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img 4194304M
+Formatting 'TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img', fmt=luks size=4398046511104 key-secret=sec0 cipher-alg=aes-256 cipher-mode=cbc ivgen-alg=plain64 ivgen-hash-alg=sha256 hash-alg=sha1
+
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Write test pattern 0xa7
+qemu-io -c write -P 0xa7 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x13
+qemu-io -c write -P 0x13 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Read test pattern 0xa7
+qemu-io -c read -P 0xa7 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x13
+qemu-io -c read -P 0x13 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x91
+qemu-io -c write -P 0x91 100M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+wrote 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Write test pattern 0x5e
+qemu-io -c write -P 0x5e 3145728M 10M --object secret,id=sec0,data=MTIzNDU2,format=base64 --image-opts driver=luks,key-secret=sec0,file.filename=TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+wrote 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Open dev
+sudo cryptsetup -q -v luksOpen TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Set dev owner
+sudo chown UID:GID /dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Read test pattern 0x91
+qemu-io -c read -P 0x91 100M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+read 10485760/10485760 bytes at offset 104857600
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Read test pattern 0x5e
+qemu-io -c read -P 0x5e 3145728M 10M --image-opts driver=file,filename=/dev/mapper/qiotest-145-aes-256-cbc-plain64-sha256-sha1
+read 10485760/10485760 bytes at offset 3298534883328
+10 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+# Close dev
+sudo cryptsetup -q -v luksClose qiotest-145-aes-256-cbc-plain64-sha256-sha1
+# Delete image
+unlink TEST_DIR/luks-aes-256-cbc-plain64-sha256-sha1.img
+
diff --git a/tests/test-crypto-hash.c b/tests/test-crypto-hash.c
index 6e0e89f7d6..42fc77a107 100644
--- a/tests/test-crypto-hash.c
+++ b/tests/test-crypto-hash.c
@@ -30,27 +30,56 @@
#define OUTPUT_MD5 "628d206371563035ab8ef62f492bdec9"
#define OUTPUT_SHA1 "b2e74f26758a3a421e509cee045244b78753cc02"
+#define OUTPUT_SHA224 "e2f7415aad33ef79f6516b0986d7175f" \
+ "9ca3389a85bf6cfed078737b"
#define OUTPUT_SHA256 "bc757abb0436586f392b437e5dd24096" \
"f7f224de6b74d4d86e2abc6121b160d0"
+#define OUTPUT_SHA384 "887ce52efb4f46700376356583b7e279" \
+ "4f612bd024e4495087ddb946c448c69d" \
+ "56dbf7152a94a5e63a80f3ba9f0eed78"
+#define OUTPUT_SHA512 "3a90d79638235ec6c4c11bebd84d83c0" \
+ "549bc1e84edc4b6ec7086487641256cb" \
+ "63b54e4cb2d2032b393994aa263c0dbb" \
+ "e00a9f2fe9ef6037352232a1eec55ee7"
+#define OUTPUT_RIPEMD160 "f3d658fad3fdfb2b52c9369cf0d441249ddfa8a0"
#define OUTPUT_MD5_B64 "Yo0gY3FWMDWrjvYvSSveyQ=="
#define OUTPUT_SHA1_B64 "sudPJnWKOkIeUJzuBFJEt4dTzAI="
+#define OUTPUT_SHA224_B64 "4vdBWq0z73n2UWsJhtcXX5yjOJqFv2z+0Hhzew=="
#define OUTPUT_SHA256_B64 "vHV6uwQ2WG85K0N+XdJAlvfyJN5rdNTYbiq8YSGxYNA="
+#define OUTPUT_SHA384_B64 "iHzlLvtPRnADdjVlg7fieU9hK9Ak5ElQh925RsRI" \
+ "xp1W2/cVKpSl5jqA87qfDu14"
+#define OUTPUT_SHA512_B64 "OpDXljgjXsbEwRvr2E2DwFSbwehO3Etuxwhkh2QS" \
+ "VstjtU5MstIDKzk5lKomPA274AqfL+nvYDc1IjKh" \
+ "7sVe5w=="
+#define OUTPUT_RIPEMD160_B64 "89ZY+tP9+ytSyTac8NRBJJ3fqKA="
static const char *expected_outputs[] = {
[QCRYPTO_HASH_ALG_MD5] = OUTPUT_MD5,
[QCRYPTO_HASH_ALG_SHA1] = OUTPUT_SHA1,
+ [QCRYPTO_HASH_ALG_SHA224] = OUTPUT_SHA224,
[QCRYPTO_HASH_ALG_SHA256] = OUTPUT_SHA256,
+ [QCRYPTO_HASH_ALG_SHA384] = OUTPUT_SHA384,
+ [QCRYPTO_HASH_ALG_SHA512] = OUTPUT_SHA512,
+ [QCRYPTO_HASH_ALG_RIPEMD160] = OUTPUT_RIPEMD160,
};
static const char *expected_outputs_b64[] = {
[QCRYPTO_HASH_ALG_MD5] = OUTPUT_MD5_B64,
[QCRYPTO_HASH_ALG_SHA1] = OUTPUT_SHA1_B64,
+ [QCRYPTO_HASH_ALG_SHA224] = OUTPUT_SHA224_B64,
[QCRYPTO_HASH_ALG_SHA256] = OUTPUT_SHA256_B64,
+ [QCRYPTO_HASH_ALG_SHA384] = OUTPUT_SHA384_B64,
+ [QCRYPTO_HASH_ALG_SHA512] = OUTPUT_SHA512_B64,
+ [QCRYPTO_HASH_ALG_RIPEMD160] = OUTPUT_RIPEMD160_B64,
};
static const int expected_lens[] = {
[QCRYPTO_HASH_ALG_MD5] = 16,
[QCRYPTO_HASH_ALG_SHA1] = 20,
+ [QCRYPTO_HASH_ALG_SHA224] = 28,
[QCRYPTO_HASH_ALG_SHA256] = 32,
+ [QCRYPTO_HASH_ALG_SHA384] = 48,
+ [QCRYPTO_HASH_ALG_SHA512] = 64,
+ [QCRYPTO_HASH_ALG_RIPEMD160] = 20,
};
static const char hex[] = "0123456789abcdef";
@@ -68,6 +97,10 @@ static void test_hash_alloc(void)
int ret;
size_t j;
+ if (!qcrypto_hash_supports(i)) {
+ continue;
+ }
+
ret = qcrypto_hash_bytes(i,
INPUT_TEXT,
strlen(INPUT_TEXT),
@@ -98,6 +131,10 @@ static void test_hash_prealloc(void)
int ret;
size_t j;
+ if (!qcrypto_hash_supports(i)) {
+ continue;
+ }
+
resultlen = expected_lens[i];
result = g_new0(uint8_t, resultlen);
@@ -137,6 +174,10 @@ static void test_hash_iov(void)
int ret;
size_t j;
+ if (!qcrypto_hash_supports(i)) {
+ continue;
+ }
+
ret = qcrypto_hash_bytesv(i,
iov, 3,
&result,
@@ -165,6 +206,10 @@ static void test_hash_digest(void)
char *digest;
size_t digestsize;
+ if (!qcrypto_hash_supports(i)) {
+ continue;
+ }
+
digestsize = qcrypto_hash_digest_len(i);
g_assert_cmpint(digestsize * 2, ==, strlen(expected_outputs[i]));
@@ -175,7 +220,7 @@ static void test_hash_digest(void)
&digest,
NULL);
g_assert(ret == 0);
- g_assert(g_str_equal(digest, expected_outputs[i]));
+ g_assert_cmpstr(digest, ==, expected_outputs[i]);
g_free(digest);
}
}
@@ -191,13 +236,17 @@ static void test_hash_base64(void)
int ret;
char *digest;
+ if (!qcrypto_hash_supports(i)) {
+ continue;
+ }
+
ret = qcrypto_hash_base64(i,
INPUT_TEXT,
strlen(INPUT_TEXT),
&digest,
NULL);
g_assert(ret == 0);
- g_assert(g_str_equal(digest, expected_outputs_b64[i]));
+ g_assert_cmpstr(digest, ==, expected_outputs_b64[i]);
g_free(digest);
}
}
diff --git a/tests/test-crypto-xts.c b/tests/test-crypto-xts.c
index 7f68b063cd..1f1412c45a 100644
--- a/tests/test-crypto-xts.c
+++ b/tests/test-crypto-xts.c
@@ -340,7 +340,7 @@ static void test_xts_aes_decrypt(const void *ctx,
static void test_xts(const void *opaque)
{
const QCryptoXTSTestData *data = opaque;
- unsigned char OUT[512], Torg[16], T[16];
+ unsigned char out[512], Torg[16], T[16];
uint64_t seq;
int j;
unsigned long len;
@@ -371,38 +371,38 @@ static void test_xts(const void *opaque)
xts_encrypt(&aesdata, &aestweak,
test_xts_aes_encrypt,
test_xts_aes_decrypt,
- T, data->PTLEN, OUT, data->PTX);
+ T, data->PTLEN, out, data->PTX);
} else {
xts_encrypt(&aesdata, &aestweak,
test_xts_aes_encrypt,
test_xts_aes_decrypt,
- T, len, OUT, data->PTX);
+ T, len, out, data->PTX);
xts_encrypt(&aesdata, &aestweak,
test_xts_aes_encrypt,
test_xts_aes_decrypt,
- T, len, &OUT[len], &data->PTX[len]);
+ T, len, &out[len], &data->PTX[len]);
}
- g_assert(memcmp(OUT, data->CTX, data->PTLEN) == 0);
+ g_assert(memcmp(out, data->CTX, data->PTLEN) == 0);
memcpy(T, Torg, sizeof(T));
if (j == 0) {
xts_decrypt(&aesdata, &aestweak,
test_xts_aes_encrypt,
test_xts_aes_decrypt,
- T, data->PTLEN, OUT, data->CTX);
+ T, data->PTLEN, out, data->CTX);
} else {
xts_decrypt(&aesdata, &aestweak,
test_xts_aes_encrypt,
test_xts_aes_decrypt,
- T, len, OUT, data->CTX);
+ T, len, out, data->CTX);
xts_decrypt(&aesdata, &aestweak,
test_xts_aes_encrypt,
test_xts_aes_decrypt,
- T, len, &OUT[len], &data->CTX[len]);
+ T, len, &out[len], &data->CTX[len]);
}
- g_assert(memcmp(OUT, data->PTX, data->PTLEN) == 0);
+ g_assert(memcmp(out, data->PTX, data->PTLEN) == 0);
}
}
diff --git a/tests/test-io-channel-socket.c b/tests/test-io-channel-socket.c
index 855306b8dd..f73e063d7d 100644
--- a/tests/test-io-channel-socket.c
+++ b/tests/test-io-channel-socket.c
@@ -383,7 +383,7 @@ static void test_io_channel_unix(bool async)
qapi_free_SocketAddress(listen_addr);
qapi_free_SocketAddress(connect_addr);
- unlink(TEST_SOCKET);
+ g_assert(g_file_test(TEST_SOCKET, G_FILE_TEST_EXISTS) == FALSE);
}
diff --git a/tests/test-qmp-input-visitor.c b/tests/test-qmp-input-visitor.c
index 3b6b39e297..1a4585c553 100644
--- a/tests/test-qmp-input-visitor.c
+++ b/tests/test-qmp-input-visitor.c
@@ -766,6 +766,8 @@ static void test_visitor_in_errors(TestInputVisitorData *data,
Error *err = NULL;
Visitor *v;
strList *q = NULL;
+ UserDefTwo *r = NULL;
+ WrapAlternate *s = NULL;
v = visitor_input_test_init(data, "{ 'integer': false, 'boolean': 'foo', "
"'string': -42 }");
@@ -778,6 +780,16 @@ static void test_visitor_in_errors(TestInputVisitorData *data,
visit_type_strList(v, NULL, &q, &err);
error_free_or_abort(&err);
assert(!q);
+
+ v = visitor_input_test_init(data, "{ 'str':'hi' }");
+ visit_type_UserDefTwo(v, NULL, &r, &err);
+ error_free_or_abort(&err);
+ assert(!r);
+
+ v = visitor_input_test_init(data, "{ }");
+ visit_type_WrapAlternate(v, NULL, &s, &err);
+ error_free_or_abort(&err);
+ assert(!s);
}
static void test_visitor_in_wrong_type(TestInputVisitorData *data,
diff --git a/tests/vhost-user-test.c b/tests/vhost-user-test.c
index 8b2164b99d..421d432f44 100644
--- a/tests/vhost-user-test.c
+++ b/tests/vhost-user-test.c
@@ -127,25 +127,12 @@ typedef struct TestServer {
int fds_num;
int fds[VHOST_MEMORY_MAX_NREGIONS];
VhostUserMemory memory;
- GMutex data_mutex;
- GCond data_cond;
+ CompatGMutex data_mutex;
+ CompatGCond data_cond;
int log_fd;
uint64_t rings;
} TestServer;
-#if !GLIB_CHECK_VERSION(2, 32, 0)
-static gboolean g_cond_wait_until(CompatGCond cond, CompatGMutex mutex,
- gint64 end_time)
-{
- gboolean ret = FALSE;
- end_time -= g_get_monotonic_time();
- GTimeVal time = { end_time / G_TIME_SPAN_SECOND,
- end_time % G_TIME_SPAN_SECOND };
- ret = g_cond_timed_wait(cond, mutex, &time);
- return ret;
-}
-#endif
-
static const char *tmpfs;
static const char *root;
diff --git a/trace-events b/trace-events
index 9d76de8574..476705996b 100644
--- a/trace-events
+++ b/trace-events
@@ -156,3 +156,19 @@ memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned si
#
# Targets: TCG(all)
disable vcpu tcg guest_mem_before(TCGv vaddr, uint8_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d"
+
+# @num: System call number.
+# @arg*: System call argument value.
+#
+# Start executing a guest system call in syscall emulation mode.
+#
+# Targets: TCG(all)
+disable vcpu guest_user_syscall(uint64_t num, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, uint64_t arg6, uint64_t arg7, uint64_t arg8) "num=0x%016"PRIx64" arg1=0x%016"PRIx64" arg2=0x%016"PRIx64" arg3=0x%016"PRIx64" arg4=0x%016"PRIx64" arg5=0x%016"PRIx64" arg6=0x%016"PRIx64" arg7=0x%016"PRIx64" arg8=0x%016"PRIx64
+
+# @num: System call number.
+# @ret: System call result value.
+#
+# Finish executing a guest system call in syscall emulation mode.
+#
+# Targets: TCG(all)
+disable vcpu guest_user_syscall_ret(uint64_t num, uint64_t ret) "num=0x%016"PRIx64" ret=0x%016"PRIx64
diff --git a/trace/control.c b/trace/control.c
index e1556a3570..86de8b9983 100644
--- a/trace/control.c
+++ b/trace/control.c
@@ -21,11 +21,33 @@
#endif
#include "qapi/error.h"
#include "qemu/error-report.h"
+#include "qemu/config-file.h"
#include "monitor/monitor.h"
int trace_events_enabled_count;
bool trace_events_dstate[TRACE_EVENT_COUNT];
+QemuOptsList qemu_trace_opts = {
+ .name = "trace",
+ .implied_opt_name = "enable",
+ .head = QTAILQ_HEAD_INITIALIZER(qemu_trace_opts.head),
+ .desc = {
+ {
+ .name = "enable",
+ .type = QEMU_OPT_STRING,
+ },
+ {
+ .name = "events",
+ .type = QEMU_OPT_STRING,
+ },{
+ .name = "file",
+ .type = QEMU_OPT_STRING,
+ },
+ { /* end of list */ }
+ },
+};
+
+
TraceEvent *trace_event_name(const char *name)
{
assert(name != NULL);
@@ -142,7 +164,7 @@ void trace_enable_events(const char *line_buf)
}
}
-void trace_init_events(const char *fname)
+static void trace_init_events(const char *fname)
{
Location loc;
FILE *fp;
@@ -217,3 +239,21 @@ bool trace_init_backends(void)
return true;
}
+
+char *trace_opt_parse(const char *optarg)
+{
+ char *trace_file;
+ QemuOpts *opts = qemu_opts_parse_noisily(qemu_find_opts("trace"),
+ optarg, true);
+ if (!opts) {
+ exit(1);
+ }
+ if (qemu_opt_get(opts, "enable")) {
+ trace_enable_events(qemu_opt_get(opts, "enable"));
+ }
+ trace_init_events(qemu_opt_get(opts, "events"));
+ trace_file = g_strdup(qemu_opt_get(opts, "file"));
+ qemu_opts_del(opts);
+
+ return trace_file;
+}
diff --git a/trace/control.h b/trace/control.h
index e2ba6d4de1..a2dd3eaedf 100644
--- a/trace/control.h
+++ b/trace/control.h
@@ -160,17 +160,6 @@ static void trace_event_set_state_dynamic(TraceEvent *ev, bool state);
bool trace_init_backends(void);
/**
- * trace_init_events:
- * @events: Name of file with events to be enabled at startup; may be NULL.
- * Corresponds to commandline option "-trace events=...".
- *
- * Read the list of enabled tracing events.
- *
- * Returns: Whether the backends could be successfully initialized.
- */
-void trace_init_events(const char *file);
-
-/**
* trace_init_file:
* @file: Name of trace output file; may be NULL.
* Corresponds to commandline option "-trace file=...".
@@ -197,6 +186,20 @@ void trace_list_events(void);
*/
void trace_enable_events(const char *line_buf);
+/**
+ * Definition of QEMU options describing trace subsystem configuration
+ */
+extern QemuOptsList qemu_trace_opts;
+
+/**
+ * trace_opt_parse:
+ * @optarg: A string argument of --trace command line argument
+ *
+ * Initialize tracing subsystem.
+ *
+ * Returns the filename to save trace to. It must be freed with g_free().
+ */
+char *trace_opt_parse(const char *optarg);
#include "trace/control-internal.h"
diff --git a/ui/vnc.c b/ui/vnc.c
index 95e4db763b..18c0b56c3a 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -3205,29 +3205,24 @@ int vnc_display_pw_expire(const char *id, time_t expires)
return 0;
}
-char *vnc_display_local_addr(const char *id)
+static void vnc_display_print_local_addr(VncDisplay *vs)
{
- VncDisplay *vs = vnc_display_find(id);
SocketAddress *addr;
- char *ret;
Error *err = NULL;
- assert(vs);
-
addr = qio_channel_socket_get_local_address(vs->lsock, &err);
if (!addr) {
- return NULL;
+ return;
}
if (addr->type != SOCKET_ADDRESS_KIND_INET) {
qapi_free_SocketAddress(addr);
- return NULL;
+ return;
}
- ret = g_strdup_printf("%s:%s", addr->u.inet.data->host,
- addr->u.inet.data->port);
+ error_printf_unless_qmp("VNC server running on %s:%s\n",
+ addr->u.inet.data->host,
+ addr->u.inet.data->port);
qapi_free_SocketAddress(addr);
-
- return ret;
}
static QemuOptsList qemu_vnc_opts = {
@@ -3513,6 +3508,7 @@ void vnc_display_open(const char *id, Error **errp)
const char *vnc;
char *h;
const char *credid;
+ int show_vnc_port = 0;
bool sasl = false;
#ifdef CONFIG_VNC_SASL
int saslErr;
@@ -3592,6 +3588,7 @@ void vnc_display_open(const char *id, Error **errp)
if (to) {
inet->has_to = true;
inet->to = to + 5900;
+ show_vnc_port = 1;
}
inet->ipv4 = ipv4;
inet->has_ipv4 = has_ipv4;
@@ -3836,6 +3833,10 @@ void vnc_display_open(const char *id, Error **errp)
}
}
+ if (show_vnc_port) {
+ vnc_display_print_local_addr(vs);
+ }
+
qapi_free_SocketAddress(saddr);
qapi_free_SocketAddress(wsaddr);
return;
diff --git a/user-exec.c b/user-exec.c
index 50e95a68de..95f9f97c5c 100644
--- a/user-exec.c
+++ b/user-exec.c
@@ -117,14 +117,7 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
#if defined(__i386__)
-#if defined(__APPLE__)
-#include <sys/ucontext.h>
-
-#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext->ss.eip))
-#define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
-#define ERROR_sig(context) ((context)->uc_mcontext->es.err)
-#define MASK_sig(context) ((context)->uc_sigmask)
-#elif defined(__NetBSD__)
+#if defined(__NetBSD__)
#include <ucontext.h>
#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
@@ -274,44 +267,6 @@ int cpu_signal_handler(int host_signum, void *pinfo,
#define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
-#ifdef __APPLE__
-#include <sys/ucontext.h>
-typedef struct ucontext SIGCONTEXT;
-/* All Registers access - only for local access */
-#define REG_sig(reg_name, context) \
- ((context)->uc_mcontext->ss.reg_name)
-#define FLOATREG_sig(reg_name, context) \
- ((context)->uc_mcontext->fs.reg_name)
-#define EXCEPREG_sig(reg_name, context) \
- ((context)->uc_mcontext->es.reg_name)
-#define VECREG_sig(reg_name, context) \
- ((context)->uc_mcontext->vs.reg_name)
-/* Gpr Registers access */
-#define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
-/* Program counter */
-#define IAR_sig(context) REG_sig(srr0, context)
-/* Machine State Register (Supervisor) */
-#define MSR_sig(context) REG_sig(srr1, context)
-#define CTR_sig(context) REG_sig(ctr, context)
-/* Link register */
-#define XER_sig(context) REG_sig(xer, context)
-/* User's integer exception register */
-#define LR_sig(context) REG_sig(lr, context)
-/* Condition register */
-#define CR_sig(context) REG_sig(cr, context)
-/* Float Registers access */
-#define FLOAT_sig(reg_num, context) \
- FLOATREG_sig(fpregs[reg_num], context)
-#define FPSCR_sig(context) \
- ((double)FLOATREG_sig(fpscr, context))
-/* Exception Registers access */
-/* Fault registers for coredump */
-#define DAR_sig(context) EXCEPREG_sig(dar, context)
-#define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
-/* number of powerpc exception taken */
-#define TRAP_sig(context) EXCEPREG_sig(exception, context)
-#endif /* __APPLE__ */
-
int cpu_signal_handler(int host_signum, void *pinfo,
void *puc)
{
@@ -494,24 +449,6 @@ int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
is_write, &uc->uc_sigmask);
}
-#elif defined(__mc68000)
-
-int cpu_signal_handler(int host_signum, void *pinfo,
- void *puc)
-{
- siginfo_t *info = pinfo;
- struct ucontext *uc = puc;
- unsigned long pc;
- int is_write;
-
- pc = uc->uc_mcontext.gregs[16];
- /* XXX: compute is_write */
- is_write = 0;
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write,
- &uc->uc_sigmask);
-}
-
#elif defined(__ia64)
#ifndef __ISR_VALID
@@ -616,48 +553,6 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write, &uc->uc_sigmask);
}
-#elif defined(__hppa__)
-
-int cpu_signal_handler(int host_signum, void *pinfo,
- void *puc)
-{
- siginfo_t *info = pinfo;
- struct ucontext *uc = puc;
- unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
- uint32_t insn = *(uint32_t *)pc;
- int is_write = 0;
-
- /* XXX: need kernel patch to get write flag faster. */
- switch (insn >> 26) {
- case 0x1a: /* STW */
- case 0x19: /* STH */
- case 0x18: /* STB */
- case 0x1b: /* STWM */
- is_write = 1;
- break;
-
- case 0x09: /* CSTWX, FSTWX, FSTWS */
- case 0x0b: /* CSTDX, FSTDX, FSTDS */
- /* Distinguish from coprocessor load ... */
- is_write = (insn >> 9) & 1;
- break;
-
- case 0x03:
- switch ((insn >> 6) & 15) {
- case 0xa: /* STWS */
- case 0x9: /* STHS */
- case 0x8: /* STBS */
- case 0xe: /* STWAS */
- case 0xc: /* STBYS */
- is_write = 1;
- }
- break;
- }
-
- return handle_cpu_signal(pc, (unsigned long)info->si_addr,
- is_write, &uc->uc_sigmask);
-}
-
#else
#error host CPU specific signal handler needed
diff --git a/util/Makefile.objs b/util/Makefile.objs
index 45f8794864..96cb1e0e58 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -34,3 +34,4 @@ util-obj-y += base64.o
util-obj-y += log.o
util-obj-y += qdist.o
util-obj-y += qht.o
+util-obj-y += range.o
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index 0d6cd1f4ef..cc2b043907 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -997,6 +997,24 @@ int socket_listen(SocketAddress *addr, Error **errp)
return fd;
}
+void socket_listen_cleanup(int fd, Error **errp)
+{
+ SocketAddress *addr;
+
+ addr = socket_local_address(fd, errp);
+
+ if (addr->type == SOCKET_ADDRESS_KIND_UNIX
+ && addr->u.q_unix.data->path) {
+ if (unlink(addr->u.q_unix.data->path) < 0 && errno != ENOENT) {
+ error_setg_errno(errp, errno,
+ "Failed to unlink socket %s",
+ addr->u.q_unix.data->path);
+ }
+ }
+
+ g_free(addr);
+}
+
int socket_dgram(SocketAddress *remote, SocketAddress *local, Error **errp)
{
int fd;
@@ -1151,3 +1169,39 @@ void qapi_copy_SocketAddress(SocketAddress **p_dest,
qmp_input_visitor_cleanup(qiv);
qobject_decref(obj);
}
+
+char *socket_address_to_string(struct SocketAddress *addr, Error **errp)
+{
+ char *buf;
+ InetSocketAddress *inet;
+ char host_port[INET6_ADDRSTRLEN + 5 + 4];
+
+ switch (addr->type) {
+ case SOCKET_ADDRESS_KIND_INET:
+ inet = addr->u.inet.data;
+ if (strchr(inet->host, ':') == NULL) {
+ snprintf(host_port, sizeof(host_port), "%s:%s", inet->host,
+ inet->port);
+ buf = g_strdup(host_port);
+ } else {
+ snprintf(host_port, sizeof(host_port), "[%s]:%s", inet->host,
+ inet->port);
+ buf = g_strdup(host_port);
+ }
+ break;
+
+ case SOCKET_ADDRESS_KIND_UNIX:
+ buf = g_strdup(addr->u.q_unix.data->path);
+ break;
+
+ case SOCKET_ADDRESS_KIND_FD:
+ buf = g_strdup(addr->u.fd.data->str);
+ break;
+
+ default:
+ error_setg(errp, "socket family %d unsupported",
+ addr->type);
+ return NULL;
+ }
+ return buf;
+}
diff --git a/util/range.c b/util/range.c
new file mode 100644
index 0000000000..e90c988dbf
--- /dev/null
+++ b/util/range.c
@@ -0,0 +1,76 @@
+/*
+ * QEMU 64-bit address ranges
+ *
+ * Copyright (c) 2015-2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/range.h"
+
+/*
+ * Operations on 64 bit address ranges.
+ * Notes:
+ * - ranges must not wrap around 0, but can include the last byte ~0x0LL.
+ * - this can not represent a full 0 to ~0x0LL range.
+ */
+
+/* Return -1 if @a < @b, 1 if greater, and 0 if they touch or overlap. */
+static inline int range_compare(Range *a, Range *b)
+{
+ /* Zero a->end is 2**64, and therefore not less than any b->begin */
+ if (a->end && a->end < b->begin) {
+ return -1;
+ }
+ if (b->end && a->begin > b->end) {
+ return 1;
+ }
+ return 0;
+}
+
+/* Insert @data into @list of ranges; caller no longer owns @data */
+GList *range_list_insert(GList *list, Range *data)
+{
+ GList *l;
+
+ /* Range lists require no empty ranges */
+ assert(data->begin < data->end || (data->begin && !data->end));
+
+ /* Skip all list elements strictly less than data */
+ for (l = list; l && range_compare(l->data, data) < 0; l = l->next) {
+ }
+
+ if (!l || range_compare(l->data, data) > 0) {
+ /* Rest of the list (if any) is strictly greater than @data */
+ return g_list_insert_before(list, l, data);
+ }
+
+ /* Current list element overlaps @data, merge the two */
+ range_extend(l->data, data);
+ g_free(data);
+
+ /* Merge any subsequent list elements that now also overlap */
+ while (l->next && range_compare(l->data, l->next->data) == 0) {
+ GList *new_l;
+
+ range_extend(l->data, l->next->data);
+ g_free(l->next->data);
+ new_l = g_list_delete_link(list, l->next);
+ assert(new_l == list);
+ }
+
+ return list;
+}
diff --git a/vl.c b/vl.c
index c85833a63c..9bb7f4cd70 100644
--- a/vl.c
+++ b/vl.c
@@ -262,26 +262,6 @@ static QemuOptsList qemu_sandbox_opts = {
},
};
-static QemuOptsList qemu_trace_opts = {
- .name = "trace",
- .implied_opt_name = "enable",
- .head = QTAILQ_HEAD_INITIALIZER(qemu_trace_opts.head),
- .desc = {
- {
- .name = "enable",
- .type = QEMU_OPT_STRING,
- },
- {
- .name = "events",
- .type = QEMU_OPT_STRING,
- },{
- .name = "file",
- .type = QEMU_OPT_STRING,
- },
- { /* end of list */ }
- },
-};
-
static QemuOptsList qemu_option_rom_opts = {
.name = "option-rom",
.implied_opt_name = "romfile",
@@ -1234,8 +1214,10 @@ static void smp_parse(QemuOpts *opts)
} else if (cores == 0) {
threads = threads > 0 ? threads : 1;
cores = cpus / (sockets * threads);
+ cores = cores > 0 ? cores : 1;
} else if (threads == 0) {
threads = cpus / (cores * sockets);
+ threads = threads > 0 ? threads : 1;
} else if (sockets * cores * threads < cpus) {
error_report("cpu topology: "
"sockets (%u) * cores (%u) * threads (%u) < "
@@ -1524,6 +1506,7 @@ MachineInfoList *qmp_query_machines(Error **errp)
info->name = g_strdup(mc->name);
info->cpu_max = !mc->max_cpus ? 1 : mc->max_cpus;
+ info->hotpluggable_cpus = !!mc->query_hotpluggable_cpus;
entry = g_malloc0(sizeof(*entry));
entry->value = info;
@@ -2947,7 +2930,6 @@ int main(int argc, char **argv, char **envp)
const char *qtest_log = NULL;
const char *pid_file = NULL;
const char *incoming = NULL;
- int show_vnc_port = 0;
bool defconfig = true;
bool userconfig = true;
bool nographic = false;
@@ -3863,23 +3845,9 @@ int main(int argc, char **argv, char **envp)
xen_mode = XEN_ATTACH;
break;
case QEMU_OPTION_trace:
- {
- opts = qemu_opts_parse_noisily(qemu_find_opts("trace"),
- optarg, true);
- if (!opts) {
- exit(1);
- }
- if (qemu_opt_get(opts, "enable")) {
- trace_enable_events(qemu_opt_get(opts, "enable"));
- }
- trace_init_events(qemu_opt_get(opts, "events"));
- if (trace_file) {
- g_free(trace_file);
- }
- trace_file = g_strdup(qemu_opt_get(opts, "file"));
- qemu_opts_del(opts);
+ g_free(trace_file);
+ trace_file = trace_opt_parse(optarg);
break;
- }
case QEMU_OPTION_readconfig:
{
int ret = qemu_read_config_file(optarg);
@@ -4216,7 +4184,6 @@ int main(int argc, char **argv, char **envp)
display_type = DT_COCOA;
#elif defined(CONFIG_VNC)
vnc_parse("localhost:0,to=99,id=default", &error_abort);
- show_vnc_port = 1;
#else
display_type = DT_NONE;
#endif
@@ -4565,11 +4532,6 @@ int main(int argc, char **argv, char **envp)
qemu_opts_foreach(qemu_find_opts("vnc"),
vnc_init_func, NULL, NULL);
#endif
- if (show_vnc_port) {
- char *ret = vnc_display_local_addr("default");
- printf("VNC server running on '%s'\n", ret);
- g_free(ret);
- }
if (using_spice) {
qemu_spice_display_init();