aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore11
-rw-r--r--.travis.yml24
-rw-r--r--HACKING18
-rw-r--r--MAINTAINERS32
-rw-r--r--Makefile97
-rw-r--r--Makefile.objs2
-rw-r--r--Makefile.target5
-rw-r--r--README1
-rw-r--r--aio-posix.c392
-rw-r--r--aio-win32.c115
-rw-r--r--arch_init.c50
-rw-r--r--async.c66
-rw-r--r--backends/baum.c31
-rw-r--r--backends/cryptodev-builtin.c69
-rw-r--r--backends/cryptodev.c34
-rw-r--r--backends/hostmem.c26
-rw-r--r--block.c2
-rw-r--r--block/Makefile.objs6
-rw-r--r--block/blkdebug.c86
-rw-r--r--block/blkverify.c201
-rw-r--r--block/curl.c8
-rw-r--r--block/file-posix.c (renamed from block/raw-posix.c)0
-rw-r--r--block/file-win32.c (renamed from block/raw-win32.c)0
-rw-r--r--block/gluster.c4
-rw-r--r--block/io.c41
-rw-r--r--block/iscsi.c3
-rw-r--r--block/linux-aio.c19
-rw-r--r--block/nbd-client.c8
-rw-r--r--block/nfs.c7
-rw-r--r--block/qcow.c8
-rw-r--r--block/quorum.c410
-rw-r--r--block/raw-format.c (renamed from block/raw_bsd.c)2
-rw-r--r--block/sheepdog.c26
-rw-r--r--block/ssh.c4
-rw-r--r--block/trace-events4
-rw-r--r--block/vdi.c8
-rw-r--r--block/vhdx.c17
-rw-r--r--block/vmdk.c9
-rw-r--r--block/vpc.c11
-rw-r--r--block/vvfat.c20
-rw-r--r--block/win32-aio.c4
-rwxr-xr-xconfigure43
-rw-r--r--contrib/libvhost-user/Makefile.objs1
-rw-r--r--contrib/libvhost-user/libvhost-user.c1499
-rw-r--r--contrib/libvhost-user/libvhost-user.h435
-rw-r--r--cpus.c79
-rw-r--r--cputlb.c21
-rw-r--r--default-configs/hppa-linux-user.mak1
-rw-r--r--default-configs/m68k-softmmu.mak2
-rw-r--r--default-configs/nios2-linux-user.mak1
-rw-r--r--default-configs/nios2-softmmu.mak6
-rw-r--r--default-configs/sparc64-softmmu.mak2
-rw-r--r--disas.c2
-rw-r--r--disas/Makefile.objs2
-rw-r--r--disas/cris.c2
-rw-r--r--disas/hppa.c2832
-rw-r--r--disas/i386.c12
-rw-r--r--disas/nios2.c3534
-rw-r--r--disas/ppc.c10
-rw-r--r--docs/colo-proxy.txt4
-rw-r--r--docs/lockcnt.txt277
-rw-r--r--docs/multiple-iothreads.txt13
-rw-r--r--docs/pcie.txt12
-rw-r--r--docs/qapi-code-gen.txt180
-rw-r--r--docs/qemu-ga-ref.texi78
-rw-r--r--docs/qemu-qmp-ref.texi78
-rw-r--r--docs/qemu_logo.pdfbin0 -> 9117 bytes
-rw-r--r--docs/qmp-commands.txt3824
-rw-r--r--docs/qmp-events.txt731
-rw-r--r--docs/qmp-intro.txt3
-rw-r--r--docs/replay.txt14
-rw-r--r--docs/specs/fw_cfg.txt36
-rw-r--r--docs/specs/pci-ids.txt3
-rw-r--r--docs/specs/vhost-user.txt16
-rw-r--r--docs/usb-storage.txt2
-rw-r--r--docs/usb2.txt2
-rw-r--r--exec.c43
-rw-r--r--fpu/softfloat-specialize.h20
-rw-r--r--gdbstub.c18
-rw-r--r--hax-stub.c34
-rw-r--r--hmp.c10
-rw-r--r--hw/9pfs/9p.c111
-rw-r--r--hw/9pfs/9p.h26
-rw-r--r--hw/9pfs/virtio-9p-device.c46
-rw-r--r--hw/9pfs/virtio-9p.h10
-rw-r--r--hw/Makefile.objs6
-rw-r--r--hw/acpi/Makefile.objs19
-rw-r--r--hw/acpi/acpi-stub.c29
-rw-r--r--hw/acpi/cpu.c6
-rw-r--r--hw/acpi/cpu_hotplug.c4
-rw-r--r--hw/acpi/ich9.c3
-rw-r--r--hw/acpi/ipmi-stub.c (renamed from stubs/ipmi.c)0
-rw-r--r--hw/acpi/memory_hotplug.c420
-rw-r--r--hw/acpi/memory_hotplug_acpi_table.c262
-rw-r--r--hw/acpi/piix4.c3
-rw-r--r--hw/arm/aspeed.c111
-rw-r--r--hw/arm/aspeed_soc.c95
-rw-r--r--hw/arm/imx25_pdk.c2
-rw-r--r--hw/arm/pxa2xx.c13
-rw-r--r--hw/arm/tosa.c11
-rw-r--r--hw/arm/virt-acpi-build.c168
-rw-r--r--hw/arm/virt.c788
-rw-r--r--hw/arm/xlnx-zynqmp.c2
-rw-r--r--hw/arm/z2.c11
-rw-r--r--hw/audio/ac97.c11
-rw-r--r--hw/audio/es1370.c20
-rw-r--r--hw/audio/marvell_88w8618.c18
-rw-r--r--hw/audio/pl041.c25
-rw-r--r--hw/audio/wm8750.c4
-rw-r--r--hw/block/m25p80.c85
-rw-r--r--hw/block/virtio-blk.c20
-rw-r--r--hw/char/cadence_uart.c14
-rw-r--r--hw/char/exynos4210_uart.c16
-rw-r--r--hw/char/serial.c10
-rw-r--r--hw/char/virtio-serial-bus.c3
-rw-r--r--hw/core/Makefile.objs4
-rw-r--r--hw/core/generic-loader.c4
-rw-r--r--hw/core/loader.c18
-rw-r--r--hw/core/null-machine.c27
-rw-r--r--hw/core/qdev-properties.c2
-rw-r--r--hw/core/reset.c72
-rw-r--r--hw/display/cirrus_vga.c11
-rw-r--r--hw/display/framebuffer.c2
-rw-r--r--hw/display/milkymist-tmu2.c2
-rw-r--r--hw/display/ssd0303.c4
-rw-r--r--hw/display/virtio-gpu-3d.c13
-rw-r--r--hw/display/virtio-gpu.c82
-rw-r--r--hw/display/xlnx_dp.c4
-rw-r--r--hw/gpio/max7310.c4
-rw-r--r--hw/i2c/core.c37
-rw-r--r--hw/i2c/i2c-ddc.c4
-rw-r--r--hw/i2c/imx_i2c.c2
-rw-r--r--hw/i2c/smbus.c13
-rw-r--r--hw/i386/acpi-build.c218
-rw-r--r--hw/i386/amd_iommu.c2
-rw-r--r--hw/i386/amd_iommu.h4
-rw-r--r--hw/i386/intel_iommu.c114
-rw-r--r--hw/i386/intel_iommu_internal.h13
-rw-r--r--hw/i386/kvm/apic.c1
-rw-r--r--hw/i386/kvmvapic.c7
-rw-r--r--hw/i386/pc.c57
-rw-r--r--hw/i386/pc_piix.c15
-rw-r--r--hw/i386/pc_q35.c13
-rw-r--r--hw/i386/pci-assign-load-rom.c16
-rw-r--r--hw/i386/x86-iommu.c17
-rw-r--r--hw/input/lm832x.c4
-rw-r--r--hw/input/ps2.c10
-rw-r--r--hw/intc/Makefile.objs1
-rw-r--r--hw/intc/apic_common.c3
-rw-r--r--hw/intc/arm_gic_common.c6
-rw-r--r--hw/intc/arm_gic_kvm.c17
-rw-r--r--hw/intc/arm_gicv3.c5
-rw-r--r--hw/intc/arm_gicv3_common.c34
-rw-r--r--hw/intc/arm_gicv3_cpuif.c1316
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c20
-rw-r--r--hw/intc/arm_gicv3_kvm.c19
-rw-r--r--hw/intc/gicv3_internal.h79
-rw-r--r--hw/intc/ioapic.c22
-rw-r--r--hw/intc/ioapic_common.c3
-rw-r--r--hw/intc/nios2_iic.c103
-rw-r--r--hw/intc/s390_flic_kvm.c12
-rw-r--r--hw/intc/trace-events40
-rw-r--r--hw/isa/isa-bus.c1
-rw-r--r--hw/lm32/lm32_hwsetup.h2
-rw-r--r--hw/m68k/mcf5208.c25
-rw-r--r--hw/misc/aspeed_scu.c4
-rw-r--r--hw/misc/aspeed_sdmc.c3
-rw-r--r--hw/misc/ivshmem.c14
-rw-r--r--hw/misc/tmp105.c3
-rw-r--r--hw/misc/vmport.c2
-rw-r--r--hw/net/cadence_gem.c2
-rw-r--r--hw/net/dp8393x.c95
-rw-r--r--hw/net/e1000e.c5
-rw-r--r--hw/net/fsl_etsec/rings.c19
-rw-r--r--hw/net/mcf_fec.c71
-rw-r--r--hw/net/rtl8139.c34
-rw-r--r--hw/net/spapr_llan.c4
-rw-r--r--hw/net/vhost_net.c19
-rw-r--r--hw/net/virtio-net.c45
-rw-r--r--hw/net/vmxnet3.c26
-rw-r--r--hw/nios2/10m50_devboard.c126
-rw-r--r--hw/nios2/Makefile.objs1
-rw-r--r--hw/nios2/boot.c223
-rw-r--r--hw/nios2/boot.h11
-rw-r--r--hw/nios2/cpu_pic.c70
-rw-r--r--hw/nvram/eeprom93xx.c8
-rw-r--r--hw/nvram/fw_cfg.c118
-rw-r--r--hw/pci-bridge/ioh3420.c6
-rw-r--r--hw/pci-bridge/xio3130_downstream.c6
-rw-r--r--hw/pci-bridge/xio3130_upstream.c6
-rw-r--r--hw/pci/msix.c8
-rw-r--r--hw/pci/pci.c70
-rw-r--r--hw/pci/pcie.c17
-rw-r--r--hw/pci/pcie_aer.c19
-rw-r--r--hw/pci/shpc.c7
-rw-r--r--hw/ppc/pnv_xscom.c2
-rw-r--r--hw/ppc/ppce500_spin.c4
-rw-r--r--hw/ppc/spapr.c2
-rw-r--r--hw/ppc/spapr_drc.c2
-rw-r--r--hw/ppc/spapr_hcall.c2
-rw-r--r--hw/s390x/s390-pci-bus.c361
-rw-r--r--hw/s390x/s390-pci-bus.h50
-rw-r--r--hw/s390x/s390-pci-inst.c75
-rw-r--r--hw/s390x/s390-pci-inst.h2
-rw-r--r--hw/s390x/s390-virtio-ccw.c19
-rw-r--r--hw/s390x/virtio-ccw.c6
-rw-r--r--hw/scsi/megasas.c13
-rw-r--r--hw/scsi/scsi-bus.c8
-rw-r--r--hw/scsi/scsi-disk.c3
-rw-r--r--hw/scsi/vhost-scsi.c25
-rw-r--r--hw/scsi/virtio-scsi.c40
-rw-r--r--hw/scsi/vmw_pvscsi.c2
-rw-r--r--hw/sh4/sh7750.c2
-rw-r--r--hw/smbios/Makefile.objs12
-rw-r--r--hw/smbios/smbios-stub.c31
-rw-r--r--hw/smbios/smbios.c2
-rw-r--r--hw/smbios/smbios_type_38-stub.c (renamed from stubs/smbios_type_38.c)0
-rw-r--r--hw/sparc64/Makefile.objs2
-rw-r--r--hw/sparc64/niagara.c177
-rw-r--r--hw/sparc64/sparc64.c378
-rw-r--r--hw/sparc64/sun4u.c379
-rw-r--r--hw/ssi/aspeed_smc.c342
-rw-r--r--hw/ssi/imx_spi.c11
-rw-r--r--hw/timer/Makefile.objs3
-rw-r--r--hw/timer/altera_timer.c237
-rw-r--r--hw/timer/ds1338.c10
-rw-r--r--hw/timer/mc146818rtc.c12
-rw-r--r--hw/timer/sun4v-rtc.c102
-rw-r--r--hw/timer/twl92230.c12
-rw-r--r--hw/usb/bus.c19
-rw-r--r--hw/usb/ccid-card-emulated.c2
-rw-r--r--hw/usb/dev-mtp.c4
-rw-r--r--hw/usb/hcd-xhci.c2
-rw-r--r--hw/usb/redirect.c26
-rw-r--r--hw/vfio/pci-quirks.c2
-rw-r--r--hw/vfio/pci.c4
-rw-r--r--hw/virtio/Makefile.objs6
-rw-r--r--hw/virtio/trace-events2
-rw-r--r--hw/virtio/vhost-backend.c99
-rw-r--r--hw/virtio/vhost-stub.c (renamed from stubs/vhost.c)0
-rw-r--r--hw/virtio/vhost-user.c34
-rw-r--r--hw/virtio/vhost.c175
-rw-r--r--hw/virtio/virtio-balloon.c7
-rw-r--r--hw/virtio/virtio-bus.c9
-rw-r--r--hw/virtio/virtio-crypto-pci.c6
-rw-r--r--hw/virtio/virtio-crypto.c43
-rw-r--r--hw/virtio/virtio-mmio.c97
-rw-r--r--hw/virtio/virtio-pci.c32
-rw-r--r--hw/virtio/virtio-pci.h4
-rw-r--r--hw/virtio/virtio.c192
-rw-r--r--include/block/aio.h91
-rw-r--r--include/block/block.h2
-rw-r--r--include/block/block_int.h5
-rw-r--r--include/disas/bfd.h6
-rw-r--r--include/elf.h2
-rw-r--r--include/exec/exec-all.h14
-rw-r--r--include/exec/memory.h14
-rw-r--r--include/exec/ram_addr.h46
-rw-r--r--include/exec/ramlist.h72
-rw-r--r--include/glib-compat.h2
-rw-r--r--include/hw/acpi/acpi-defs.h45
-rw-r--r--include/hw/acpi/acpi_dev_interface.h2
-rw-r--r--include/hw/acpi/memory_hotplug.h12
-rw-r--r--include/hw/acpi/pc-hotplug.h23
-rw-r--r--include/hw/arm/aspeed_soc.h4
-rw-r--r--include/hw/arm/virt-acpi-build.h47
-rw-r--r--include/hw/arm/virt.h44
-rw-r--r--include/hw/boards.h2
-rw-r--r--include/hw/compat.h11
-rw-r--r--include/hw/dma/xlnx_dpdma.h3
-rw-r--r--include/hw/hw.h6
-rw-r--r--include/hw/i2c/i2c.h16
-rw-r--r--include/hw/i386/pc.h9
-rw-r--r--include/hw/i386/x86-iommu.h1
-rw-r--r--include/hw/intc/arm_gic_common.h2
-rw-r--r--include/hw/intc/arm_gicv3_common.h21
-rw-r--r--include/hw/loader.h7
-rw-r--r--include/hw/m68k/mcf.h4
-rw-r--r--include/hw/m68k/mcf_fec.h13
-rw-r--r--include/hw/misc/aspeed_scu.h1
-rw-r--r--include/hw/nvram/fw_cfg.h3
-rw-r--r--include/hw/nvram/fw_cfg_keys.h3
-rw-r--r--include/hw/pci-host/q35.h2
-rw-r--r--include/hw/pci/pcie.h14
-rw-r--r--include/hw/pci/pcie_aer.h4
-rw-r--r--include/hw/register.h2
-rw-r--r--include/hw/smbios/smbios.h2
-rw-r--r--include/hw/sparc/sparc64.h5
-rw-r--r--include/hw/ssi/aspeed_smc.h4
-rw-r--r--include/hw/timer/sun4v-rtc.h1
-rw-r--r--include/hw/virtio/vhost-backend.h15
-rw-r--r--include/hw/virtio/vhost.h4
-rw-r--r--include/hw/virtio/virtio-access.h31
-rw-r--r--include/hw/virtio/virtio-bus.h1
-rw-r--r--include/hw/virtio/virtio-gpu.h3
-rw-r--r--include/hw/virtio/virtio-net.h1
-rw-r--r--include/hw/virtio/virtio.h12
-rw-r--r--include/io/dns-resolver.h228
-rw-r--r--include/io/task.h154
-rw-r--r--include/migration/migration.h13
-rw-r--r--include/migration/vmstate.h46
-rw-r--r--include/net/vhost_net.h2
-rw-r--r--include/qapi/dealloc-visitor.h2
-rw-r--r--include/qapi/error.h3
-rw-r--r--include/qemu/config-file.h4
-rw-r--r--include/qemu/coroutine.h6
-rw-r--r--include/qemu/event_notifier.h3
-rw-r--r--include/qemu/futex.h36
-rw-r--r--include/qemu/host-utils.h25
-rw-r--r--include/qemu/main-loop.h15
-rw-r--r--include/qemu/qht.h2
-rw-r--r--include/qemu/queue.h60
-rw-r--r--include/qemu/sockets.h2
-rw-r--r--include/qemu/thread.h112
-rw-r--r--include/qemu/xattr.h2
-rw-r--r--include/qom/cpu.h5
-rw-r--r--include/qom/object.h26
-rw-r--r--include/qom/object_interfaces.h17
-rw-r--r--include/standard-headers/linux/virtio_crypto.h481
-rw-r--r--include/standard-headers/linux/virtio_mmio.h141
-rw-r--r--include/sysemu/arch_init.h3
-rw-r--r--include/sysemu/cryptodev.h42
-rw-r--r--include/sysemu/hax.h56
-rw-r--r--include/sysemu/hostmem.h1
-rw-r--r--include/sysemu/hw_accel.h48
-rw-r--r--include/sysemu/iothread.h5
-rw-r--r--include/sysemu/kvm.h23
-rw-r--r--include/sysemu/numa.h2
-rw-r--r--include/sysemu/replay.h12
-rw-r--r--include/sysemu/reset.h10
-rw-r--r--include/sysemu/sysemu.h8
-rw-r--r--include/ui/console.h7
-rw-r--r--include/ui/gtk.h4
-rw-r--r--io/Makefile.objs1
-rw-r--r--io/channel-socket.c44
-rw-r--r--io/channel-tls.c16
-rw-r--r--io/channel-websock.c8
-rw-r--r--io/dns-resolver.c276
-rw-r--r--io/task.c62
-rw-r--r--io/trace-events1
-rw-r--r--iohandler.c10
-rw-r--r--iothread.c84
-rw-r--r--linux-headers/linux/vhost.h2
-rw-r--r--linux-user/alpha/target_syscall.h2
-rw-r--r--linux-user/elfload.c316
-rw-r--r--linux-user/errno_defs.h3
-rw-r--r--linux-user/hppa/sockbits.h97
-rw-r--r--linux-user/hppa/syscall_nr.h353
-rw-r--r--linux-user/hppa/target_cpu.h35
-rw-r--r--linux-user/hppa/target_signal.h29
-rw-r--r--linux-user/hppa/target_structs.h54
-rw-r--r--linux-user/hppa/target_syscall.h237
-rw-r--r--linux-user/hppa/termbits.h219
-rw-r--r--linux-user/ioctls.h8
-rw-r--r--linux-user/main.c334
-rw-r--r--linux-user/mips/target_syscall.h5
-rw-r--r--linux-user/mips64/target_syscall.h5
-rw-r--r--linux-user/nios2/syscall_nr.h329
-rw-r--r--linux-user/nios2/target_cpu.h39
-rw-r--r--linux-user/nios2/target_signal.h26
-rw-r--r--linux-user/nios2/target_structs.h58
-rw-r--r--linux-user/nios2/target_syscall.h37
-rw-r--r--linux-user/nios2/termbits.h220
-rw-r--r--linux-user/qemu.h3
-rw-r--r--linux-user/signal.c428
-rw-r--r--linux-user/socket.h2
-rw-r--r--linux-user/syscall.c12
-rw-r--r--linux-user/syscall_defs.h160
-rw-r--r--linux-user/syscall_types.h6
-rw-r--r--memory.c9
-rw-r--r--migration/Makefile.objs3
-rw-r--r--migration/migration.c61
-rw-r--r--migration/ram.c18
-rw-r--r--migration/savevm.c41
-rw-r--r--migration/socket.c11
-rw-r--r--migration/tls.c19
-rw-r--r--migration/trace-events12
-rw-r--r--migration/vmstate.c203
-rw-r--r--monitor.c4
-rw-r--r--nbd/common.c8
-rw-r--r--nbd/nbd-internal.h3
-rw-r--r--nbd/server.c9
-rw-r--r--net/Makefile.objs1
-rw-r--r--net/checksum.c21
-rw-r--r--net/filter-replay.c92
-rw-r--r--net/tap.c8
-rw-r--r--numa.c53
-rw-r--r--qapi-schema.json1358
-rw-r--r--qapi/block-core.json700
-rw-r--r--qapi/block.json66
-rw-r--r--qapi/common.json38
-rw-r--r--qapi/crypto.json5
-rw-r--r--qapi/event.json248
-rw-r--r--qapi/introspect.json5
-rw-r--r--qapi/rocker.json61
-rw-r--r--qapi/trace.json17
-rw-r--r--qdev-monitor.c9
-rw-r--r--qemu-char.c33
-rw-r--r--qemu-doc.texi19
-rw-r--r--qemu-img.c29
-rw-r--r--qemu-options-wrapper.h2
-rw-r--r--qemu-options.hx66
-rw-r--r--qga/main.c4
-rw-r--r--qga/qapi-schema.json11
-rw-r--r--qom/cpu.c14
-rw-r--r--qom/object.c10
-rw-r--r--qom/object_interfaces.c78
-rw-r--r--qtest.c2
-rw-r--r--replay/Makefile.objs1
-rw-r--r--replay/replay-events.c11
-rw-r--r--replay/replay-internal.h10
-rw-r--r--replay/replay-net.c102
-rw-r--r--replay/replay.c2
-rw-r--r--rules.mak12
-rw-r--r--scripts/hxtool11
-rw-r--r--scripts/qapi.py583
-rwxr-xr-xscripts/qapi2texi.py271
-rwxr-xr-xscripts/texi2pod.pl54
-rwxr-xr-xscripts/update-linux-headers.sh2
-rw-r--r--stubs/Makefile.objs19
-rw-r--r--stubs/cpus.c11
-rw-r--r--stubs/fdset-add-fd.c8
-rw-r--r--stubs/fdset-find-fd.c8
-rw-r--r--stubs/fdset-get-fd.c8
-rw-r--r--stubs/fdset-remove-fd.c7
-rw-r--r--stubs/fdset.c22
-rw-r--r--stubs/get-next-serial.c4
-rw-r--r--stubs/iohandler.c8
-rw-r--r--stubs/kvm.c8
-rw-r--r--stubs/migr-blocker.c3
-rw-r--r--stubs/migration-colo.c46
-rw-r--r--stubs/mon-is-qmp.c10
-rw-r--r--stubs/monitor-init.c7
-rw-r--r--stubs/monitor.c (renamed from stubs/get-fd.c)6
-rw-r--r--stubs/pc_madt_cpu_entry.c2
-rw-r--r--stubs/replay-user.c33
-rw-r--r--stubs/reset.c14
-rw-r--r--stubs/set-fd-handler.c1
-rw-r--r--target/alpha/cpu.c2
-rw-r--r--target/alpha/helper.h4
-rw-r--r--target/alpha/int_helper.c15
-rw-r--r--target/alpha/machine.c6
-rw-r--r--target/alpha/sys_helper.c2
-rw-r--r--target/alpha/translate.c73
-rw-r--r--target/arm/cpu.c33
-rw-r--r--target/arm/cpu.h15
-rw-r--r--target/arm/cpu64.c8
-rw-r--r--target/arm/helper-a64.c20
-rw-r--r--target/arm/helper-a64.h4
-rw-r--r--target/arm/helper.c72
-rw-r--r--target/arm/helper.h1
-rw-r--r--target/arm/machine.c14
-rw-r--r--target/arm/op_helper.c9
-rw-r--r--target/arm/psci.c25
-rw-r--r--target/arm/translate-a64.c104
-rw-r--r--target/arm/translate.c43
-rw-r--r--target/cris/cpu.c3
-rw-r--r--target/cris/cpu.h9
-rw-r--r--target/cris/helper.h1
-rw-r--r--target/cris/op_helper.c5
-rw-r--r--target/cris/translate.c2
-rw-r--r--target/hppa/Makefile.objs1
-rw-r--r--target/hppa/cpu-qom.h52
-rw-r--r--target/hppa/cpu.c164
-rw-r--r--target/hppa/cpu.h144
-rw-r--r--target/hppa/gdbstub.c111
-rw-r--r--target/hppa/helper.c137
-rw-r--r--target/hppa/helper.h66
-rw-r--r--target/hppa/op_helper.c570
-rw-r--r--target/hppa/translate.c3946
-rw-r--r--target/i386/Makefile.objs7
-rw-r--r--target/i386/cc_helper.c3
-rw-r--r--target/i386/cpu-qom.h2
-rw-r--r--target/i386/cpu.c54
-rw-r--r--target/i386/cpu.h8
-rw-r--r--target/i386/fpu_helper.c2
-rw-r--r--target/i386/hax-all.c1155
-rw-r--r--target/i386/hax-darwin.c316
-rw-r--r--target/i386/hax-darwin.h63
-rw-r--r--target/i386/hax-i386.h94
-rw-r--r--target/i386/hax-interface.h361
-rw-r--r--target/i386/hax-mem.c289
-rw-r--r--target/i386/hax-windows.c479
-rw-r--r--target/i386/hax-windows.h89
-rw-r--r--target/i386/helper.c9
-rw-r--r--target/i386/helper.h2
-rw-r--r--target/i386/hyperv.c7
-rw-r--r--target/i386/int_helper.c11
-rw-r--r--target/i386/kvm.c35
-rw-r--r--target/i386/machine.c28
-rw-r--r--target/i386/misc_helper.c2
-rw-r--r--target/i386/ops_sse.h26
-rw-r--r--target/i386/ops_sse_header.h1
-rw-r--r--target/i386/svm_helper.c2
-rw-r--r--target/i386/translate.c99
-rw-r--r--target/lm32/cpu.c3
-rw-r--r--target/lm32/cpu.h3
-rw-r--r--target/m68k/cpu.c3
-rw-r--r--target/m68k/cpu.h8
-rw-r--r--target/m68k/helper.c52
-rw-r--r--target/m68k/helper.h23
-rw-r--r--target/m68k/op_helper.c498
-rw-r--r--target/m68k/qregs.def2
-rw-r--r--target/m68k/translate.c1939
-rw-r--r--target/microblaze/cpu.c3
-rw-r--r--target/microblaze/cpu.h3
-rw-r--r--target/microblaze/helper.h1
-rw-r--r--target/microblaze/mmu.c2
-rw-r--r--target/microblaze/op_helper.c5
-rw-r--r--target/microblaze/translate.c2
-rw-r--r--target/mips/cpu.c3
-rw-r--r--target/mips/cpu.h5
-rw-r--r--target/mips/helper.c6
-rw-r--r--target/mips/helper.h7
-rw-r--r--target/mips/kvm.c5
-rw-r--r--target/mips/machine.c14
-rw-r--r--target/mips/op_helper.c30
-rw-r--r--target/mips/translate.c35
-rw-r--r--target/moxie/cpu.c4
-rw-r--r--target/moxie/cpu.h3
-rw-r--r--target/nios2/Makefile.objs4
-rw-r--r--target/nios2/cpu.c237
-rw-r--r--target/nios2/cpu.h272
-rw-r--r--target/nios2/helper.c313
-rw-r--r--target/nios2/helper.h27
-rw-r--r--target/nios2/mmu.c296
-rw-r--r--target/nios2/mmu.h50
-rw-r--r--target/nios2/monitor.c35
-rw-r--r--target/nios2/op_helper.c47
-rw-r--r--target/nios2/translate.c958
-rw-r--r--target/openrisc/cpu.c9
-rw-r--r--target/openrisc/cpu.h3
-rw-r--r--target/openrisc/helper.h2
-rw-r--r--target/openrisc/int_helper.c19
-rw-r--r--target/openrisc/interrupt.c2
-rw-r--r--target/openrisc/interrupt_helper.c2
-rw-r--r--target/openrisc/sys_helper.c2
-rw-r--r--target/openrisc/translate.c6
-rw-r--r--target/ppc/helper.h7
-rw-r--r--target/ppc/helper_regs.h4
-rw-r--r--target/ppc/int_helper.c38
-rw-r--r--target/ppc/kvm.c7
-rw-r--r--target/ppc/kvm_ppc.h12
-rw-r--r--target/ppc/machine.c12
-rw-r--r--target/ppc/misc_helper.c4
-rw-r--r--target/ppc/mmu-hash64.c2
-rw-r--r--target/ppc/mmu_helper.c32
-rw-r--r--target/ppc/translate.c61
-rw-r--r--target/ppc/translate_init.c6
-rw-r--r--target/s390x/cpu.c7
-rw-r--r--target/s390x/cpu.h5
-rw-r--r--target/s390x/cpu_models.c1
-rw-r--r--target/s390x/gdbstub.c3
-rw-r--r--target/s390x/helper.h1
-rw-r--r--target/s390x/int_helper.c21
-rw-r--r--target/s390x/kvm.c11
-rw-r--r--target/s390x/mem_helper.c8
-rw-r--r--target/s390x/translate.c36
-rw-r--r--target/sh4/cpu.c3
-rw-r--r--target/sh4/cpu.h3
-rw-r--r--target/sh4/helper.c2
-rw-r--r--target/sparc/asi.h1
-rw-r--r--target/sparc/cpu.c16
-rw-r--r--target/sparc/cpu.h108
-rw-r--r--target/sparc/helper.c5
-rw-r--r--target/sparc/helper.h2
-rw-r--r--target/sparc/int64_helper.c43
-rw-r--r--target/sparc/ldst_helper.c399
-rw-r--r--target/sparc/machine.c10
-rw-r--r--target/sparc/mmu_helper.c20
-rw-r--r--target/sparc/translate.c66
-rw-r--r--target/sparc/win_helper.c46
-rw-r--r--target/tilegx/cpu.c3
-rw-r--r--target/tilegx/cpu.h3
-rw-r--r--target/tilegx/helper.c15
-rw-r--r--target/tilegx/helper.h3
-rw-r--r--target/tilegx/translate.c6
-rw-r--r--target/tricore/cpu.c2
-rw-r--r--target/tricore/fpu_helper.c134
-rw-r--r--target/tricore/helper.h7
-rw-r--r--target/tricore/op_helper.c15
-rw-r--r--target/tricore/translate.c55
-rw-r--r--target/tricore/tricore-opcodes.h3
-rw-r--r--target/unicore32/cpu.c2
-rw-r--r--target/unicore32/helper.c12
-rw-r--r--target/unicore32/helper.h3
-rw-r--r--target/unicore32/translate.c6
-rw-r--r--target/xtensa/helper.h2
-rw-r--r--target/xtensa/op_helper.c15
-rw-r--r--target/xtensa/translate.c4
-rw-r--r--tcg-runtime.c40
-rw-r--r--tcg/README41
-rw-r--r--tcg/aarch64/tcg-target.h10
-rw-r--r--tcg/aarch64/tcg-target.inc.c157
-rw-r--r--tcg/arm/tcg-target.h41
-rw-r--r--tcg/arm/tcg-target.inc.c121
-rw-r--r--tcg/i386/tcg-target.h17
-rw-r--r--tcg/i386/tcg-target.inc.c727
-rw-r--r--tcg/ia64/tcg-target.h10
-rw-r--r--tcg/ia64/tcg-target.inc.c28
-rw-r--r--tcg/mips/tcg-target.h70
-rw-r--r--tcg/mips/tcg-target.inc.c1252
-rw-r--r--tcg/optimize.c94
-rw-r--r--tcg/ppc/tcg-target.h13
-rw-r--r--tcg/ppc/tcg-target.inc.c117
-rw-r--r--tcg/s390/tcg-target.h128
-rw-r--r--tcg/s390/tcg-target.inc.c248
-rw-r--r--tcg/sparc/tcg-target.h10
-rw-r--r--tcg/sparc/tcg-target.inc.c28
-rw-r--r--tcg/tcg-op.c692
-rw-r--r--tcg/tcg-op.h42
-rw-r--r--tcg/tcg-opc.h10
-rw-r--r--tcg/tcg-runtime.h9
-rw-r--r--tcg/tcg.c173
-rw-r--r--tcg/tcg.h14
-rw-r--r--tcg/tci/tcg-target.h10
-rw-r--r--tcg/tci/tcg-target.inc.c25
-rw-r--r--tests/.gitignore1
-rw-r--r--tests/Makefile.include25
-rw-r--r--tests/acpi-test-data/pc/DSDTbin6008 -> 5098 bytes
-rw-r--r--tests/acpi-test-data/pc/DSDT.bridgebin7867 -> 6957 bytes
-rw-r--r--tests/acpi-test-data/pc/DSDT.cphpbin6471 -> 5561 bytes
-rw-r--r--tests/acpi-test-data/pc/DSDT.ipmikcsbin6080 -> 5170 bytes
-rw-r--r--tests/acpi-test-data/pc/DSDT.memhpbin0 -> 6463 bytes
-rw-r--r--tests/acpi-test-data/pc/SRAT.memhpbin0 -> 224 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDTbin8770 -> 7860 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDT.bridgebin8787 -> 7877 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDT.cphpbin9233 -> 8323 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDT.ipmibtbin8845 -> 7935 bytes
-rw-r--r--tests/acpi-test-data/q35/DSDT.memhpbin0 -> 9225 bytes
-rw-r--r--tests/acpi-test-data/q35/SRAT.memhpbin0 -> 224 bytes
-rw-r--r--tests/bios-tables-test.c24
-rw-r--r--tests/device-introspect-test.c60
-rw-r--r--tests/libqtest.c12
-rw-r--r--tests/m25p80-test.c133
-rw-r--r--tests/qapi-schema/alternate-any.err2
-rw-r--r--tests/qapi-schema/alternate-any.json4
-rw-r--r--tests/qapi-schema/alternate-array.err2
-rw-r--r--tests/qapi-schema/alternate-array.json7
-rw-r--r--tests/qapi-schema/alternate-base.err2
-rw-r--r--tests/qapi-schema/alternate-base.json7
-rw-r--r--tests/qapi-schema/alternate-clash.err2
-rw-r--r--tests/qapi-schema/alternate-clash.json4
-rw-r--r--tests/qapi-schema/alternate-conflict-dict.err2
-rw-r--r--tests/qapi-schema/alternate-conflict-dict.json10
-rw-r--r--tests/qapi-schema/alternate-conflict-string.err2
-rw-r--r--tests/qapi-schema/alternate-conflict-string.json7
-rw-r--r--tests/qapi-schema/alternate-empty.err2
-rw-r--r--tests/qapi-schema/alternate-empty.json4
-rw-r--r--tests/qapi-schema/alternate-nested.err2
-rw-r--r--tests/qapi-schema/alternate-nested.json7
-rw-r--r--tests/qapi-schema/alternate-unknown.err2
-rw-r--r--tests/qapi-schema/alternate-unknown.json4
-rw-r--r--tests/qapi-schema/args-alternate.err2
-rw-r--r--tests/qapi-schema/args-alternate.json8
-rw-r--r--tests/qapi-schema/args-any.err2
-rw-r--r--tests/qapi-schema/args-any.json4
-rw-r--r--tests/qapi-schema/args-array-empty.err2
-rw-r--r--tests/qapi-schema/args-array-empty.json4
-rw-r--r--tests/qapi-schema/args-array-unknown.err2
-rw-r--r--tests/qapi-schema/args-array-unknown.json4
-rw-r--r--tests/qapi-schema/args-bad-boxed.err2
-rw-r--r--tests/qapi-schema/args-bad-boxed.json4
-rw-r--r--tests/qapi-schema/args-boxed-anon.err2
-rw-r--r--tests/qapi-schema/args-boxed-anon.json4
-rw-r--r--tests/qapi-schema/args-boxed-empty.err2
-rw-r--r--tests/qapi-schema/args-boxed-empty.json8
-rw-r--r--tests/qapi-schema/args-boxed-string.err2
-rw-r--r--tests/qapi-schema/args-boxed-string.json4
-rw-r--r--tests/qapi-schema/args-int.err2
-rw-r--r--tests/qapi-schema/args-int.json4
-rw-r--r--tests/qapi-schema/args-invalid.err2
-rw-r--r--tests/qapi-schema/args-invalid.json3
-rw-r--r--tests/qapi-schema/args-member-array-bad.err2
-rw-r--r--tests/qapi-schema/args-member-array-bad.json4
-rw-r--r--tests/qapi-schema/args-member-case.err2
-rw-r--r--tests/qapi-schema/args-member-case.json4
-rw-r--r--tests/qapi-schema/args-member-unknown.err2
-rw-r--r--tests/qapi-schema/args-member-unknown.json4
-rw-r--r--tests/qapi-schema/args-name-clash.err2
-rw-r--r--tests/qapi-schema/args-name-clash.json4
-rw-r--r--tests/qapi-schema/args-union.err2
-rw-r--r--tests/qapi-schema/args-union.json7
-rw-r--r--tests/qapi-schema/args-unknown.err2
-rw-r--r--tests/qapi-schema/args-unknown.json4
-rw-r--r--tests/qapi-schema/bad-base.err2
-rw-r--r--tests/qapi-schema/bad-base.json7
-rw-r--r--tests/qapi-schema/bad-data.err2
-rw-r--r--tests/qapi-schema/bad-data.json4
-rw-r--r--tests/qapi-schema/bad-ident.err2
-rw-r--r--tests/qapi-schema/bad-ident.json4
-rw-r--r--tests/qapi-schema/bad-type-bool.err2
-rw-r--r--tests/qapi-schema/bad-type-bool.json4
-rw-r--r--tests/qapi-schema/bad-type-dict.err2
-rw-r--r--tests/qapi-schema/bad-type-dict.json4
-rw-r--r--tests/qapi-schema/base-cycle-direct.err2
-rw-r--r--tests/qapi-schema/base-cycle-direct.json4
-rw-r--r--tests/qapi-schema/base-cycle-indirect.err2
-rw-r--r--tests/qapi-schema/base-cycle-indirect.json7
-rw-r--r--tests/qapi-schema/command-int.err2
-rw-r--r--tests/qapi-schema/command-int.json4
-rw-r--r--tests/qapi-schema/comments.json4
-rw-r--r--tests/qapi-schema/comments.out1
-rw-r--r--tests/qapi-schema/doc-bad-args.err1
-rw-r--r--tests/qapi-schema/doc-bad-args.exit1
-rw-r--r--tests/qapi-schema/doc-bad-args.json8
-rw-r--r--tests/qapi-schema/doc-bad-args.out0
-rw-r--r--tests/qapi-schema/doc-bad-symbol.err1
-rw-r--r--tests/qapi-schema/doc-bad-symbol.exit1
-rw-r--r--tests/qapi-schema/doc-bad-symbol.json6
-rw-r--r--tests/qapi-schema/doc-bad-symbol.out0
-rw-r--r--tests/qapi-schema/doc-duplicated-arg.err1
-rw-r--r--tests/qapi-schema/doc-duplicated-arg.exit1
-rw-r--r--tests/qapi-schema/doc-duplicated-arg.json7
-rw-r--r--tests/qapi-schema/doc-duplicated-arg.out0
-rw-r--r--tests/qapi-schema/doc-duplicated-return.err1
-rw-r--r--tests/qapi-schema/doc-duplicated-return.exit1
-rw-r--r--tests/qapi-schema/doc-duplicated-return.json8
-rw-r--r--tests/qapi-schema/doc-duplicated-return.out0
-rw-r--r--tests/qapi-schema/doc-duplicated-since.err1
-rw-r--r--tests/qapi-schema/doc-duplicated-since.exit1
-rw-r--r--tests/qapi-schema/doc-duplicated-since.json8
-rw-r--r--tests/qapi-schema/doc-duplicated-since.out0
-rw-r--r--tests/qapi-schema/doc-empty-arg.err1
-rw-r--r--tests/qapi-schema/doc-empty-arg.exit1
-rw-r--r--tests/qapi-schema/doc-empty-arg.json6
-rw-r--r--tests/qapi-schema/doc-empty-arg.out0
-rw-r--r--tests/qapi-schema/doc-empty-section.err1
-rw-r--r--tests/qapi-schema/doc-empty-section.exit1
-rw-r--r--tests/qapi-schema/doc-empty-section.json8
-rw-r--r--tests/qapi-schema/doc-empty-section.out0
-rw-r--r--tests/qapi-schema/doc-empty-symbol.err1
-rw-r--r--tests/qapi-schema/doc-empty-symbol.exit1
-rw-r--r--tests/qapi-schema/doc-empty-symbol.json5
-rw-r--r--tests/qapi-schema/doc-empty-symbol.out0
-rw-r--r--tests/qapi-schema/doc-interleaved-section.err1
-rw-r--r--tests/qapi-schema/doc-interleaved-section.exit1
-rw-r--r--tests/qapi-schema/doc-interleaved-section.json21
-rw-r--r--tests/qapi-schema/doc-interleaved-section.out0
-rw-r--r--tests/qapi-schema/doc-invalid-end.err1
-rw-r--r--tests/qapi-schema/doc-invalid-end.exit1
-rw-r--r--tests/qapi-schema/doc-invalid-end.json5
-rw-r--r--tests/qapi-schema/doc-invalid-end.out0
-rw-r--r--tests/qapi-schema/doc-invalid-end2.err1
-rw-r--r--tests/qapi-schema/doc-invalid-end2.exit1
-rw-r--r--tests/qapi-schema/doc-invalid-end2.json5
-rw-r--r--tests/qapi-schema/doc-invalid-end2.out0
-rw-r--r--tests/qapi-schema/doc-invalid-return.err1
-rw-r--r--tests/qapi-schema/doc-invalid-return.exit1
-rw-r--r--tests/qapi-schema/doc-invalid-return.json7
-rw-r--r--tests/qapi-schema/doc-invalid-return.out0
-rw-r--r--tests/qapi-schema/doc-invalid-section.err1
-rw-r--r--tests/qapi-schema/doc-invalid-section.exit1
-rw-r--r--tests/qapi-schema/doc-invalid-section.json6
-rw-r--r--tests/qapi-schema/doc-invalid-section.out0
-rw-r--r--tests/qapi-schema/doc-invalid-start.err1
-rw-r--r--tests/qapi-schema/doc-invalid-start.exit1
-rw-r--r--tests/qapi-schema/doc-invalid-start.json5
-rw-r--r--tests/qapi-schema/doc-invalid-start.out0
-rw-r--r--tests/qapi-schema/doc-missing-colon.err1
-rw-r--r--tests/qapi-schema/doc-missing-colon.exit1
-rw-r--r--tests/qapi-schema/doc-missing-colon.json5
-rw-r--r--tests/qapi-schema/doc-missing-colon.out0
-rw-r--r--tests/qapi-schema/doc-missing-expr.err1
-rw-r--r--tests/qapi-schema/doc-missing-expr.exit1
-rw-r--r--tests/qapi-schema/doc-missing-expr.json5
-rw-r--r--tests/qapi-schema/doc-missing-expr.out0
-rw-r--r--tests/qapi-schema/doc-missing-space.err1
-rw-r--r--tests/qapi-schema/doc-missing-space.exit1
-rw-r--r--tests/qapi-schema/doc-missing-space.json6
-rw-r--r--tests/qapi-schema/doc-missing-space.out0
-rw-r--r--tests/qapi-schema/doc-optional.err1
-rw-r--r--tests/qapi-schema/doc-optional.exit1
-rw-r--r--tests/qapi-schema/doc-optional.json7
-rw-r--r--tests/qapi-schema/doc-optional.out0
-rw-r--r--tests/qapi-schema/double-type.err2
-rw-r--r--tests/qapi-schema/double-type.json4
-rw-r--r--tests/qapi-schema/enum-bad-name.err2
-rw-r--r--tests/qapi-schema/enum-bad-name.json4
-rw-r--r--tests/qapi-schema/enum-bad-prefix.err2
-rw-r--r--tests/qapi-schema/enum-bad-prefix.json4
-rw-r--r--tests/qapi-schema/enum-clash-member.err2
-rw-r--r--tests/qapi-schema/enum-clash-member.json4
-rw-r--r--tests/qapi-schema/enum-dict-member.err2
-rw-r--r--tests/qapi-schema/enum-dict-member.json4
-rw-r--r--tests/qapi-schema/enum-member-case.err2
-rw-r--r--tests/qapi-schema/enum-member-case.json7
-rw-r--r--tests/qapi-schema/enum-missing-data.err2
-rw-r--r--tests/qapi-schema/enum-missing-data.json4
-rw-r--r--tests/qapi-schema/enum-wrong-data.err2
-rw-r--r--tests/qapi-schema/enum-wrong-data.json4
-rw-r--r--tests/qapi-schema/event-boxed-empty.err2
-rw-r--r--tests/qapi-schema/event-boxed-empty.json4
-rw-r--r--tests/qapi-schema/event-case.json4
-rw-r--r--tests/qapi-schema/event-case.out1
-rw-r--r--tests/qapi-schema/event-nest-struct.err2
-rw-r--r--tests/qapi-schema/event-nest-struct.json4
-rw-r--r--tests/qapi-schema/flat-union-array-branch.err2
-rw-r--r--tests/qapi-schema/flat-union-array-branch.json12
-rw-r--r--tests/qapi-schema/flat-union-bad-base.err2
-rw-r--r--tests/qapi-schema/flat-union-bad-base.json13
-rw-r--r--tests/qapi-schema/flat-union-bad-discriminator.err2
-rw-r--r--tests/qapi-schema/flat-union-bad-discriminator.json16
-rw-r--r--tests/qapi-schema/flat-union-base-any.err2
-rw-r--r--tests/qapi-schema/flat-union-base-any.json13
-rw-r--r--tests/qapi-schema/flat-union-base-union.err2
-rw-r--r--tests/qapi-schema/flat-union-base-union.json16
-rw-r--r--tests/qapi-schema/flat-union-clash-member.err2
-rw-r--r--tests/qapi-schema/flat-union-clash-member.json16
-rw-r--r--tests/qapi-schema/flat-union-empty.err2
-rw-r--r--tests/qapi-schema/flat-union-empty.json10
-rw-r--r--tests/qapi-schema/flat-union-incomplete-branch.err2
-rw-r--r--tests/qapi-schema/flat-union-incomplete-branch.json10
-rw-r--r--tests/qapi-schema/flat-union-inline.err2
-rw-r--r--tests/qapi-schema/flat-union-inline.json10
-rw-r--r--tests/qapi-schema/flat-union-int-branch.err2
-rw-r--r--tests/qapi-schema/flat-union-int-branch.json13
-rw-r--r--tests/qapi-schema/flat-union-invalid-branch-key.err2
-rw-r--r--tests/qapi-schema/flat-union-invalid-branch-key.json15
-rw-r--r--tests/qapi-schema/flat-union-invalid-discriminator.err2
-rw-r--r--tests/qapi-schema/flat-union-invalid-discriminator.json15
-rw-r--r--tests/qapi-schema/flat-union-no-base.err2
-rw-r--r--tests/qapi-schema/flat-union-no-base.json13
-rw-r--r--tests/qapi-schema/flat-union-optional-discriminator.err2
-rw-r--r--tests/qapi-schema/flat-union-optional-discriminator.json13
-rw-r--r--tests/qapi-schema/flat-union-string-discriminator.err2
-rw-r--r--tests/qapi-schema/flat-union-string-discriminator.json15
-rw-r--r--tests/qapi-schema/ident-with-escape.json4
-rw-r--r--tests/qapi-schema/ident-with-escape.out1
-rw-r--r--tests/qapi-schema/include-relpath-sub.json3
-rw-r--r--tests/qapi-schema/include-relpath.out1
-rw-r--r--tests/qapi-schema/include-repetition.out1
-rw-r--r--tests/qapi-schema/include-simple-sub.json3
-rw-r--r--tests/qapi-schema/include-simple.out1
-rw-r--r--tests/qapi-schema/indented-expr.json6
-rw-r--r--tests/qapi-schema/indented-expr.out2
-rw-r--r--tests/qapi-schema/missing-type.err2
-rw-r--r--tests/qapi-schema/missing-type.json4
-rw-r--r--tests/qapi-schema/nested-struct-data.err2
-rw-r--r--tests/qapi-schema/nested-struct-data.json4
-rw-r--r--tests/qapi-schema/qapi-schema-test.json213
-rw-r--r--tests/qapi-schema/qapi-schema-test.out130
-rw-r--r--tests/qapi-schema/redefined-builtin.err2
-rw-r--r--tests/qapi-schema/redefined-builtin.json4
-rw-r--r--tests/qapi-schema/redefined-command.err2
-rw-r--r--tests/qapi-schema/redefined-command.json7
-rw-r--r--tests/qapi-schema/redefined-event.err2
-rw-r--r--tests/qapi-schema/redefined-event.json7
-rw-r--r--tests/qapi-schema/redefined-type.err2
-rw-r--r--tests/qapi-schema/redefined-type.json7
-rw-r--r--tests/qapi-schema/reserved-command-q.err2
-rw-r--r--tests/qapi-schema/reserved-command-q.json7
-rw-r--r--tests/qapi-schema/reserved-enum-q.err2
-rw-r--r--tests/qapi-schema/reserved-enum-q.json4
-rw-r--r--tests/qapi-schema/reserved-member-has.err2
-rw-r--r--tests/qapi-schema/reserved-member-has.json4
-rw-r--r--tests/qapi-schema/reserved-member-q.err2
-rw-r--r--tests/qapi-schema/reserved-member-q.json4
-rw-r--r--tests/qapi-schema/reserved-member-u.err2
-rw-r--r--tests/qapi-schema/reserved-member-u.json4
-rw-r--r--tests/qapi-schema/reserved-member-underscore.err2
-rw-r--r--tests/qapi-schema/reserved-member-underscore.json4
-rw-r--r--tests/qapi-schema/reserved-type-kind.err2
-rw-r--r--tests/qapi-schema/reserved-type-kind.json4
-rw-r--r--tests/qapi-schema/reserved-type-list.err2
-rw-r--r--tests/qapi-schema/reserved-type-list.json4
-rw-r--r--tests/qapi-schema/returns-alternate.err2
-rw-r--r--tests/qapi-schema/returns-alternate.json7
-rw-r--r--tests/qapi-schema/returns-array-bad.err2
-rw-r--r--tests/qapi-schema/returns-array-bad.json4
-rw-r--r--tests/qapi-schema/returns-dict.err2
-rw-r--r--tests/qapi-schema/returns-dict.json4
-rw-r--r--tests/qapi-schema/returns-unknown.err2
-rw-r--r--tests/qapi-schema/returns-unknown.json4
-rw-r--r--tests/qapi-schema/returns-whitelist.err2
-rw-r--r--tests/qapi-schema/returns-whitelist.json16
-rw-r--r--tests/qapi-schema/struct-base-clash-deep.err2
-rw-r--r--tests/qapi-schema/struct-base-clash-deep.json10
-rw-r--r--tests/qapi-schema/struct-base-clash.err2
-rw-r--r--tests/qapi-schema/struct-base-clash.json7
-rw-r--r--tests/qapi-schema/struct-data-invalid.err2
-rw-r--r--tests/qapi-schema/struct-data-invalid.json3
-rw-r--r--tests/qapi-schema/struct-member-invalid.err2
-rw-r--r--tests/qapi-schema/struct-member-invalid.json3
-rw-r--r--tests/qapi-schema/test-qapi.py14
-rw-r--r--tests/qapi-schema/type-bypass-bad-gen.err2
-rw-r--r--tests/qapi-schema/type-bypass-bad-gen.json4
-rw-r--r--tests/qapi-schema/unicode-str.err2
-rw-r--r--tests/qapi-schema/unicode-str.json4
-rw-r--r--tests/qapi-schema/union-base-no-discriminator.err2
-rw-r--r--tests/qapi-schema/union-base-no-discriminator.json12
-rw-r--r--tests/qapi-schema/union-branch-case.err2
-rw-r--r--tests/qapi-schema/union-branch-case.json4
-rw-r--r--tests/qapi-schema/union-clash-branches.err2
-rw-r--r--tests/qapi-schema/union-clash-branches.json4
-rw-r--r--tests/qapi-schema/union-empty.err2
-rw-r--r--tests/qapi-schema/union-empty.json4
-rw-r--r--tests/qapi-schema/union-invalid-base.err2
-rw-r--r--tests/qapi-schema/union-invalid-base.json10
-rw-r--r--tests/qapi-schema/union-optional-branch.err2
-rw-r--r--tests/qapi-schema/union-optional-branch.json4
-rw-r--r--tests/qapi-schema/union-unknown.err2
-rw-r--r--tests/qapi-schema/union-unknown.json4
-rw-r--r--tests/qapi-schema/unknown-escape.err2
-rw-r--r--tests/qapi-schema/unknown-escape.json4
-rw-r--r--tests/qapi-schema/unknown-expr-key.err2
-rw-r--r--tests/qapi-schema/unknown-expr-key.json4
-rw-r--r--tests/qemu-iotests/071.out8
-rw-r--r--tests/test-aio.c4
-rw-r--r--tests/test-bitcnt.c140
-rw-r--r--tests/test-io-channel-socket.c5
-rw-r--r--tests/test-io-channel-tls.c5
-rw-r--r--tests/test-io-task.c31
-rw-r--r--tests/test-vmstate.c147
-rw-r--r--tests/vhost-user-bridge.c1183
-rw-r--r--tests/virtio-9p-test.c478
-rw-r--r--trace-events14
-rw-r--r--trace/control-target.c11
-rw-r--r--trace/control.c19
-rw-r--r--trace/control.h8
-rw-r--r--translate-all.c2
-rw-r--r--ui/console.c11
-rw-r--r--ui/curses.c1
-rw-r--r--ui/egl-helpers.c16
-rw-r--r--ui/gtk.c18
-rw-r--r--ui/input-keymap.c3
-rw-r--r--ui/sdl.c25
-rw-r--r--ui/sdl2.c13
-rw-r--r--ui/vnc-auth-vencrypt.c7
-rw-r--r--ui/vnc-ws.c14
-rw-r--r--ui/vnc.c8
-rw-r--r--user-exec-stub.c34
-rw-r--r--util/Makefile.objs1
-rw-r--r--util/bitmap.c8
-rw-r--r--util/event_notifier-posix.c9
-rw-r--r--util/event_notifier-win32.c12
-rw-r--r--util/lockcnt.c397
-rw-r--r--util/mmap-alloc.c17
-rw-r--r--util/oslib-win32.c2
-rw-r--r--util/qemu-coroutine.c7
-rw-r--r--util/qemu-sockets.c11
-rw-r--r--util/qemu-thread-posix.c47
-rw-r--r--util/qemu-thread-win32.c6
-rw-r--r--util/trace-events10
-rw-r--r--util/uri.c4
-rw-r--r--vl.c95
-rw-r--r--xen-mapcache.c3
947 files changed, 46297 insertions, 12459 deletions
diff --git a/.gitignore b/.gitignore
index e43c3044dc..78f180a020 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,7 @@
/qmp-marshal.c
/qemu-doc.html
/qemu-doc.info
+/qemu-doc.txt
/qemu-img
/qemu-nbd
/qemu-options.def
@@ -60,7 +61,6 @@
*.a
*.aux
*.cp
-*.dvi
*.exe
*.msi
*.dll
@@ -105,6 +105,15 @@
/pc-bios/optionrom/kvmvapic.img
/pc-bios/s390-ccw/s390-ccw.elf
/pc-bios/s390-ccw/s390-ccw.img
+/docs/qemu-ga-ref.html
+/docs/qemu-ga-ref.txt
+/docs/qemu-qmp-ref.html
+/docs/qemu-qmp-ref.txt
+docs/qemu-ga-ref.info*
+docs/qemu-qmp-ref.info*
+/qemu-ga-qapi.texi
+/qemu-qapi.texi
+*.tps
.stgit-*
cscope.*
tags
diff --git a/.travis.yml b/.travis.yml
index 9916178bf3..d83e2d493b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,6 @@ python:
- "2.4"
compiler:
- gcc
- - clang
cache: ccache
addons:
apt:
@@ -68,6 +67,9 @@ script:
- make -j3 && ${TEST_CMD}
matrix:
include:
+ # Test with CLang for compile portability
+ - env: CONFIG=""
+ compiler: clang
# gprof/gcov are GCC features
- env: CONFIG="--enable-gprof --enable-gcov --disable-pie"
compiler: gcc
@@ -101,6 +103,26 @@ matrix:
- sudo apt-get build-dep -qq qemu
- wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
- git submodule update --init --recursive
+ # Trusty build with latest stable clang
+ - env: CONFIG=""
+ sudo: required
+ addons:
+ dist: trusty
+ language: generic
+ compiler: none
+ env:
+ - COMPILER_NAME=clang CXX=clang++-3.9 CC=clang-3.9
+ - CONFIG="--cc=clang-3.9 --cxx=clang++-3.9"
+ before_install:
+ - wget -nv -O - http://llvm.org/apt/llvm-snapshot.gpg.key | sudo apt-key add -
+ - sudo apt-add-repository -y 'deb http://llvm.org/apt/trusty llvm-toolchain-trusty-3.9 main'
+ - sudo apt-get update -qq
+ - sudo apt-get install -qq -y clang-3.9
+ - sudo apt-get build-dep -qq qemu
+ - wget -O - http://people.linaro.org/~alex.bennee/qemu-submodule-git-seed.tar.xz | tar -xvJ
+ - git submodule update --init --recursive
+ before_script:
+ - ./configure ${CONFIG} || cat config.log
# Using newer GCC with sanitizers
- addons:
apt:
diff --git a/HACKING b/HACKING
index 20a910168d..4125c97d8d 100644
--- a/HACKING
+++ b/HACKING
@@ -1,10 +1,28 @@
1. Preprocessor
+1.1. Variadic macros
+
For variadic macros, stick with this C99-like syntax:
#define DPRINTF(fmt, ...) \
do { printf("IRQ: " fmt, ## __VA_ARGS__); } while (0)
+1.2. Include directives
+
+Order include directives as follows:
+
+#include "qemu/osdep.h" /* Always first... */
+#include <...> /* then system headers... */
+#include "..." /* and finally QEMU headers. */
+
+The "qemu/osdep.h" header contains preprocessor macros that affect the behavior
+of core system headers like <stdint.h>. It must be the first include so that
+core system headers included by external libraries get the preprocessor macros
+that QEMU depends on.
+
+Do not include "qemu/osdep.h" from header files since the .c file will have
+already included it.
+
2. C types
It should be common sense to use the right type, but we have collected
diff --git a/MAINTAINERS b/MAINTAINERS
index 585cd5abd7..a428cb2e23 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -132,6 +132,12 @@ F: include/hw/cris/
F: tests/tcg/cris/
F: disas/cris.c
+HPPA (PA-RISC)
+M: Richard Henderson <rth@twiddle.net>
+S: Maintained
+F: target/hppa/
+F: disas/hppa.c
+
LM32
M: Michael Walle <michael@walle.cc>
S: Maintained
@@ -181,6 +187,14 @@ F: disas/moxie.c
F: hw/moxie/
F: default-configs/moxie-softmmu.mak
+NiosII
+M: Chris Wulff <crwulff@gmail.com>
+M: Marek Vasut <marex@denx.de>
+S: Maintained
+F: target/nios2/
+F: hw/nios2/
+F: disas/nios2.c
+
OpenRISC
M: Jia Liu <proljc@gmail.com>
S: Maintained
@@ -508,7 +522,6 @@ M: Shannon Zhao <shannon.zhao@linaro.org>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
-F: include/hw/arm/virt-acpi-build.h
STM32F205
M: Alistair Francis <alistair@alistair23.me>
@@ -726,6 +739,13 @@ S: Maintained
F: hw/sparc64/sun4u.c
F: pc-bios/openbios-sparc64
+Sun4v
+M: Artyom Tarasenko <atar4qemu@gmail.com>
+S: Maintained
+F: hw/sparc64/sun4v.c
+F: hw/timer/sun4v-rtc.c
+F: include/hw/timer/sun4v-rtc.h
+
Leon3
M: Fabien Chouteau <chouteau@adacore.com>
S: Maintained
@@ -807,6 +827,7 @@ M: Eduardo Habkost <ehabkost@redhat.com>
M: Marcel Apfelbaum <marcel@redhat.com>
S: Supported
F: hw/core/machine.c
+F: hw/core/null-machine.c
F: include/hw/boards.h
Xtensa Machines
@@ -885,7 +906,6 @@ F: hw/acpi/*
F: hw/smbios/*
F: hw/i386/acpi-build.[hc]
F: hw/arm/virt-acpi-build.c
-F: include/hw/arm/virt-acpi-build.h
ppc4xx
M: Alexander Graf <agraf@suse.de>
@@ -1408,6 +1428,7 @@ F: scripts/checkpatch.pl
Migration
M: Juan Quintela <quintela@redhat.com>
M: Amit Shah <amit.shah@redhat.com>
+M: Dr. David Alan Gilbert <dgilbert@redhat.com>
S: Maintained
F: include/migration/
F: migration/
@@ -1496,6 +1517,7 @@ M: Riku Voipio <riku.voipio@iki.fi>
S: Maintained
F: thunk.c
F: user-exec.c
+F: user-exec-stub.c
BSD user
S: Orphan
@@ -1720,9 +1742,9 @@ L: qemu-block@nongnu.org
S: Supported
F: block/linux-aio.c
F: include/block/raw-aio.h
-F: block/raw-posix.c
-F: block/raw-win32.c
-F: block/raw_bsd.c
+F: block/raw-format.c
+F: block/file-posix.c
+F: block/file-win32.c
F: block/win32-aio.c
qcow2
diff --git a/Makefile b/Makefile
index 214cbad35d..c166d2d3ea 100644
--- a/Makefile
+++ b/Makefile
@@ -80,8 +80,8 @@ GENERATED_HEADERS += module_block.h
Makefile: ;
configure: ;
-.PHONY: all clean cscope distclean dvi html info install install-doc \
- pdf recurse-all speed test dist msi FORCE
+.PHONY: all clean cscope distclean html info install install-doc \
+ pdf txt recurse-all speed test dist msi FORCE
$(call set-vpath, $(SRC_PATH))
@@ -90,7 +90,9 @@ LIBS+=-lz $(LIBS_TOOLS)
HELPERS-$(CONFIG_LINUX) = qemu-bridge-helper$(EXESUF)
ifdef BUILD_DOCS
-DOCS=qemu-doc.html qemu.1 qemu-img.1 qemu-nbd.8 qemu-ga.8
+DOCS=qemu-doc.html qemu-doc.txt qemu.1 qemu-img.1 qemu-nbd.8 qemu-ga.8
+DOCS+=docs/qemu-qmp-ref.html docs/qemu-qmp-ref.txt docs/qemu-qmp-ref.7
+DOCS+=docs/qemu-ga-ref.html docs/qemu-ga-ref.txt docs/qemu-ga-ref.7
ifdef CONFIG_VIRTFS
DOCS+=fsdev/virtfs-proxy-helper.1
endif
@@ -149,6 +151,7 @@ dummy := $(call unnest-vars,, \
qga-obj-y \
ivshmem-client-obj-y \
ivshmem-server-obj-y \
+ libvhost-user-obj-y \
qga-vss-dll-obj-y \
block-obj-y \
block-obj-m \
@@ -264,6 +267,7 @@ qemu-ga$(EXESUF): QEMU_CFLAGS += -I qga/qapi-generated
gen-out-type = $(subst .,-,$(suffix $@))
qapi-py = $(SRC_PATH)/scripts/qapi.py $(SRC_PATH)/scripts/ordereddict.py
+qapi-py += $(SRC_PATH)/scripts/qapi2texi.py
qga/qapi-generated/qga-qapi-types.c qga/qapi-generated/qga-qapi-types.h :\
$(SRC_PATH)/qga/qapi-schema.json $(SRC_PATH)/scripts/qapi-types.py $(qapi-py)
@@ -386,12 +390,17 @@ distclean: clean
rm -f config-all-devices.mak config-all-disas.mak config.status
rm -f po/*.mo tests/qemu-iotests/common.env
rm -f roms/seabios/config.mak roms/vgabios/config.mak
- rm -f qemu-doc.info qemu-doc.aux qemu-doc.cp qemu-doc.cps qemu-doc.dvi
+ rm -f qemu-doc.info qemu-doc.aux qemu-doc.cp qemu-doc.cps
rm -f qemu-doc.fn qemu-doc.fns qemu-doc.info qemu-doc.ky qemu-doc.kys
rm -f qemu-doc.log qemu-doc.pdf qemu-doc.pg qemu-doc.toc qemu-doc.tp
- rm -f qemu-doc.vr
+ rm -f qemu-doc.vr qemu-doc.txt
rm -f config.log
rm -f linux-headers/asm
+ rm -f qemu-ga-qapi.texi qemu-qapi.texi
+ rm -f docs/qemu-qmp-ref.7 docs/qemu-ga-ref.7
+ rm -f docs/qemu-qmp-ref.txt docs/qemu-ga-ref.txt
+ rm -f docs/qemu-qmp-ref.pdf docs/qemu-ga-ref.pdf
+ rm -f docs/qemu-qmp-ref.html docs/qemu-ga-ref.html
for d in $(TARGET_DIRS); do \
rm -rf $$d || exit 1 ; \
done
@@ -428,10 +437,14 @@ endif
install-doc: $(DOCS)
$(INSTALL_DIR) "$(DESTDIR)$(qemu_docdir)"
$(INSTALL_DATA) qemu-doc.html "$(DESTDIR)$(qemu_docdir)"
- $(INSTALL_DATA) $(SRC_PATH)/docs/qmp-commands.txt "$(DESTDIR)$(qemu_docdir)"
+ $(INSTALL_DATA) qemu-doc.txt "$(DESTDIR)$(qemu_docdir)"
+ $(INSTALL_DATA) docs/qemu-qmp-ref.html "$(DESTDIR)$(qemu_docdir)"
+ $(INSTALL_DATA) docs/qemu-qmp-ref.txt "$(DESTDIR)$(qemu_docdir)"
ifdef CONFIG_POSIX
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man1"
$(INSTALL_DATA) qemu.1 "$(DESTDIR)$(mandir)/man1"
+ $(INSTALL_DIR) "$(DESTDIR)$(mandir)/man7"
+ $(INSTALL_DATA) docs/qemu-qmp-ref.7 "$(DESTDIR)$(mandir)/man7"
ifneq ($(TOOLS),)
$(INSTALL_DATA) qemu-img.1 "$(DESTDIR)$(mandir)/man1"
$(INSTALL_DIR) "$(DESTDIR)$(mandir)/man8"
@@ -439,6 +452,9 @@ ifneq ($(TOOLS),)
endif
ifneq (,$(findstring qemu-ga,$(TOOLS)))
$(INSTALL_DATA) qemu-ga.8 "$(DESTDIR)$(mandir)/man8"
+ $(INSTALL_DATA) docs/qemu-ga-ref.html "$(DESTDIR)$(qemu_docdir)"
+ $(INSTALL_DATA) docs/qemu-ga-ref.txt "$(DESTDIR)$(qemu_docdir)"
+ $(INSTALL_DATA) docs/qemu-ga-ref.7 "$(DESTDIR)$(mandir)/man7"
endif
endif
ifdef CONFIG_VIRTFS
@@ -526,21 +542,23 @@ ui/console-gl.o: $(SRC_PATH)/ui/console-gl.c \
ui/shader/texture-blit-vert.h ui/shader/texture-blit-frag.h
# documentation
-MAKEINFO=makeinfo
-MAKEINFOFLAGS=--no-headers --no-split --number-sections
-TEXIFLAG=$(if $(V),,--quiet)
-%.dvi: %.texi
- $(call quiet-command,texi2dvi $(TEXIFLAG) -I . $<,"GEN","$@")
+MAKEINFO=makeinfo -D 'VERSION $(VERSION)'
+MAKEINFOFLAGS=--no-split --number-sections
+TEXIFLAG=$(if $(V),,--quiet) --command='@set VERSION $(VERSION)'
%.html: %.texi
- $(call quiet-command,LC_ALL=C $(MAKEINFO) $(MAKEINFOFLAGS) --html $< -o $@, \
- "GEN","$@")
+ $(call quiet-command,LC_ALL=C $(MAKEINFO) $(MAKEINFOFLAGS) --no-headers \
+ --html $< -o $@,"GEN","$@")
%.info: %.texi
- $(call quiet-command,$(MAKEINFO) $< -o $@,"GEN","$@")
+ $(call quiet-command,$(MAKEINFO) $(MAKEINFOFLAGS) $< -o $@,"GEN","$@")
+
+%.txt: %.texi
+ $(call quiet-command,LC_ALL=C $(MAKEINFO) $(MAKEINFOFLAGS) --no-headers \
+ --plaintext $< -o $@,"GEN","$@")
%.pdf: %.texi
- $(call quiet-command,texi2pdf $(TEXIFLAG) -I . $<,"GEN","$@")
+ $(call quiet-command,texi2pdf $(TEXIFLAG) -I $(SRC_PATH) -I . $< -o $@,"GEN","$@")
qemu-options.texi: $(SRC_PATH)/qemu-options.hx $(SRC_PATH)/scripts/hxtool
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -t < $< > $@,"GEN","$@")
@@ -554,47 +572,36 @@ qemu-monitor-info.texi: $(SRC_PATH)/hmp-commands-info.hx $(SRC_PATH)/scripts/hxt
qemu-img-cmds.texi: $(SRC_PATH)/qemu-img-cmds.hx $(SRC_PATH)/scripts/hxtool
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -t < $< > $@,"GEN","$@")
+qemu-qapi.texi: $(qapi-modules) $(qapi-py)
+ $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi2texi.py $< > $@,"GEN" "$@")
+
+qemu-ga-qapi.texi: $(SRC_PATH)/qga/qapi-schema.json $(qapi-py)
+ $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi2texi.py $< > $@,"GEN","$@")
+
qemu.1: qemu-doc.texi qemu-options.texi qemu-monitor.texi qemu-monitor-info.texi
- $(call quiet-command, \
- perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu.pod && \
- $(POD2MAN) --section=1 --center=" " --release=" " qemu.pod > $@, \
- "GEN","$@")
qemu.1: qemu-option-trace.texi
-
qemu-img.1: qemu-img.texi qemu-option-trace.texi qemu-img-cmds.texi
- $(call quiet-command, \
- perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-img.pod && \
- $(POD2MAN) --section=1 --center=" " --release=" " qemu-img.pod > $@, \
- "GEN","$@")
-
fsdev/virtfs-proxy-helper.1: fsdev/virtfs-proxy-helper.texi
- $(call quiet-command, \
- perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< fsdev/virtfs-proxy-helper.pod && \
- $(POD2MAN) --section=1 --center=" " --release=" " fsdev/virtfs-proxy-helper.pod > $@, \
- "GEN","$@")
-
qemu-nbd.8: qemu-nbd.texi qemu-option-trace.texi
- $(call quiet-command, \
- perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-nbd.pod && \
- $(POD2MAN) --section=8 --center=" " --release=" " qemu-nbd.pod > $@, \
- "GEN","$@")
-
qemu-ga.8: qemu-ga.texi
- $(call quiet-command, \
- perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< qemu-ga.pod && \
- $(POD2MAN) --section=8 --center=" " --release=" " qemu-ga.pod > $@, \
- "GEN","$@")
-dvi: qemu-doc.dvi
-html: qemu-doc.html
-info: qemu-doc.info
-pdf: qemu-doc.pdf
+html: qemu-doc.html docs/qemu-qmp-ref.html docs/qemu-ga-ref.html
+info: qemu-doc.info docs/qemu-qmp-ref.info docs/qemu-ga-ref.info
+pdf: qemu-doc.pdf docs/qemu-qmp-ref.pdf docs/qemu-ga-ref.pdf
+txt: qemu-doc.txt docs/qemu-qmp-ref.txt docs/qemu-ga-ref.txt
-qemu-doc.dvi qemu-doc.html qemu-doc.info qemu-doc.pdf: \
+qemu-doc.html qemu-doc.info qemu-doc.pdf qemu-doc.txt: \
qemu-img.texi qemu-nbd.texi qemu-options.texi qemu-option-trace.texi \
qemu-monitor.texi qemu-img-cmds.texi qemu-ga.texi \
qemu-monitor-info.texi
+docs/qemu-ga-ref.dvi docs/qemu-ga-ref.html docs/qemu-ga-ref.info docs/qemu-ga-ref.pdf docs/qemu-ga-ref.txt docs/qemu-ga-ref.7: \
+docs/qemu-ga-ref.texi qemu-ga-qapi.texi
+
+docs/qemu-qmp-ref.dvi docs/qemu-qmp-ref.html docs/qemu-qmp-ref.info docs/qemu-qmp-ref.pdf docs/qemu-qmp-ref.txt docs/qemu-qmp-ref.7: \
+docs/qemu-qmp-ref.texi qemu-qapi.texi
+
+
ifdef CONFIG_WIN32
INSTALLER = qemu-setup-$(VERSION)$(EXESUF)
@@ -687,7 +694,7 @@ help:
@echo ' docker - Help about targets running tests inside Docker containers'
@echo ''
@echo 'Documentation targets:'
- @echo ' dvi html info pdf'
+ @echo ' html info pdf txt'
@echo ' - Build documentation in specified format'
@echo ''
ifdef CONFIG_WIN32
diff --git a/Makefile.objs b/Makefile.objs
index 51c36a4d54..01cef866e4 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -115,7 +115,7 @@ qga-vss-dll-obj-y = qga/
# contrib
ivshmem-client-obj-y = contrib/ivshmem-client/
ivshmem-server-obj-y = contrib/ivshmem-server/
-
+libvhost-user-obj-y = contrib/libvhost-user/
######################################################################
trace-events-y = trace-events
diff --git a/Makefile.target b/Makefile.target
index 8ae82cb311..fa2b151caa 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -97,6 +97,7 @@ obj-y += target/$(TARGET_BASE_ARCH)/
obj-y += disas.o
obj-y += tcg-runtime.o
obj-$(call notempty,$(TARGET_XML_FILES)) += gdbstub-xml.o
+obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(CONFIG_LIBDECNUMBER) += libdecnumber/decContext.o
@@ -115,7 +116,7 @@ QEMU_CFLAGS+=-I$(SRC_PATH)/linux-user/$(TARGET_ABI_DIR) \
-I$(SRC_PATH)/linux-user
obj-y += linux-user/
-obj-y += gdbstub.o thunk.o user-exec.o
+obj-y += gdbstub.o thunk.o user-exec.o user-exec-stub.o
endif #CONFIG_LINUX_USER
@@ -128,7 +129,7 @@ QEMU_CFLAGS+=-I$(SRC_PATH)/bsd-user -I$(SRC_PATH)/bsd-user/$(TARGET_ABI_DIR) \
-I$(SRC_PATH)/bsd-user/$(HOST_VARIANT_DIR)
obj-y += bsd-user/
-obj-y += gdbstub.o user-exec.o
+obj-y += gdbstub.o user-exec.o user-exec-stub.o
endif #CONFIG_BSD_USER
diff --git a/README b/README
index bd8060a3ee..cb60d05bee 100644
--- a/README
+++ b/README
@@ -45,6 +45,7 @@ of other UNIX targets. The simple steps to build QEMU are:
Additional information can also be found online via the QEMU website:
http://qemu-project.org/Hosts/Linux
+ http://qemu-project.org/Hosts/Mac
http://qemu-project.org/Hosts/W32
diff --git a/aio-posix.c b/aio-posix.c
index e13b9ab2b0..9453d83743 100644
--- a/aio-posix.c
+++ b/aio-posix.c
@@ -16,8 +16,10 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "block/block.h"
-#include "qemu/queue.h"
+#include "qemu/rcu_queue.h"
#include "qemu/sockets.h"
+#include "qemu/cutils.h"
+#include "trace.h"
#ifdef CONFIG_EPOLL_CREATE1
#include <sys/epoll.h>
#endif
@@ -27,6 +29,9 @@ struct AioHandler
GPollFD pfd;
IOHandler *io_read;
IOHandler *io_write;
+ AioPollFn *io_poll;
+ IOHandler *io_poll_begin;
+ IOHandler *io_poll_end;
int deleted;
void *opaque;
bool is_external;
@@ -61,7 +66,7 @@ static bool aio_epoll_try_enable(AioContext *ctx)
AioHandler *node;
struct epoll_event event;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
int r;
if (node->deleted || !node->pfd.events) {
continue;
@@ -200,47 +205,61 @@ void aio_set_fd_handler(AioContext *ctx,
bool is_external,
IOHandler *io_read,
IOHandler *io_write,
+ AioPollFn *io_poll,
void *opaque)
{
AioHandler *node;
bool is_new = false;
bool deleted = false;
+ qemu_lockcnt_lock(&ctx->list_lock);
+
node = find_aio_handler(ctx, fd);
/* Are we deleting the fd handler? */
- if (!io_read && !io_write) {
+ if (!io_read && !io_write && !io_poll) {
if (node == NULL) {
+ qemu_lockcnt_unlock(&ctx->list_lock);
return;
}
g_source_remove_poll(&ctx->source, &node->pfd);
/* If the lock is held, just mark the node as deleted */
- if (ctx->walking_handlers) {
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
node->pfd.revents = 0;
} else {
/* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up after
- * releasing the walking_handlers lock.
+ * deleted because deleted nodes are only cleaned up while
+ * no one is walking the handlers list.
*/
QLIST_REMOVE(node, node);
deleted = true;
}
+
+ if (!node->io_poll) {
+ ctx->poll_disable_cnt--;
+ }
} else {
if (node == NULL) {
/* Alloc and insert if it's not already there */
node = g_new0(AioHandler, 1);
node->pfd.fd = fd;
- QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
g_source_add_poll(&ctx->source, &node->pfd);
is_new = true;
+
+ ctx->poll_disable_cnt += !io_poll;
+ } else {
+ ctx->poll_disable_cnt += !io_poll - !node->io_poll;
}
+
/* Update handler with latest information */
node->io_read = io_read;
node->io_write = io_write;
+ node->io_poll = io_poll;
node->opaque = opaque;
node->is_external = is_external;
@@ -249,71 +268,132 @@ void aio_set_fd_handler(AioContext *ctx,
}
aio_epoll_update(ctx, node, is_new);
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
+
if (deleted) {
g_free(node);
}
}
+void aio_set_fd_poll(AioContext *ctx, int fd,
+ IOHandler *io_poll_begin,
+ IOHandler *io_poll_end)
+{
+ AioHandler *node = find_aio_handler(ctx, fd);
+
+ if (!node) {
+ return;
+ }
+
+ node->io_poll_begin = io_poll_begin;
+ node->io_poll_end = io_poll_end;
+}
+
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
bool is_external,
- EventNotifierHandler *io_read)
+ EventNotifierHandler *io_read,
+ AioPollFn *io_poll)
+{
+ aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
+ (IOHandler *)io_read, NULL, io_poll, notifier);
+}
+
+void aio_set_event_notifier_poll(AioContext *ctx,
+ EventNotifier *notifier,
+ EventNotifierHandler *io_poll_begin,
+ EventNotifierHandler *io_poll_end)
+{
+ aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
+ (IOHandler *)io_poll_begin,
+ (IOHandler *)io_poll_end);
+}
+
+static void poll_set_started(AioContext *ctx, bool started)
{
- aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
- is_external, (IOHandler *)io_read, NULL, notifier);
+ AioHandler *node;
+
+ if (started == ctx->poll_started) {
+ return;
+ }
+
+ ctx->poll_started = started;
+
+ qemu_lockcnt_inc(&ctx->list_lock);
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
+ IOHandler *fn;
+
+ if (node->deleted) {
+ continue;
+ }
+
+ if (started) {
+ fn = node->io_poll_begin;
+ } else {
+ fn = node->io_poll_end;
+ }
+
+ if (fn) {
+ fn(node->opaque);
+ }
+ }
+ qemu_lockcnt_dec(&ctx->list_lock);
}
+
bool aio_prepare(AioContext *ctx)
{
+ /* Poll mode cannot be used with glib's event loop, disable it. */
+ poll_set_started(ctx, false);
+
return false;
}
bool aio_pending(AioContext *ctx)
{
AioHandler *node;
+ bool result = false;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ /*
+ * We have to walk very carefully in case aio_set_fd_handler is
+ * called while we're walking.
+ */
+ qemu_lockcnt_inc(&ctx->list_lock);
+
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
int revents;
revents = node->pfd.revents & node->pfd.events;
if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
aio_node_check(ctx, node->is_external)) {
- return true;
+ result = true;
+ break;
}
if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
aio_node_check(ctx, node->is_external)) {
- return true;
+ result = true;
+ break;
}
}
+ qemu_lockcnt_dec(&ctx->list_lock);
- return false;
+ return result;
}
-bool aio_dispatch(AioContext *ctx)
+static bool aio_dispatch_handlers(AioContext *ctx)
{
- AioHandler *node;
+ AioHandler *node, *tmp;
bool progress = false;
/*
- * If there are callbacks left that have been queued, we need to call them.
- * Do not call select in this case, because it is possible that the caller
- * does not need a complete flush (as is the case for aio_poll loops).
- */
- if (aio_bh_poll(ctx)) {
- progress = true;
- }
-
- /*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
*/
- node = QLIST_FIRST(&ctx->aio_handlers);
- while (node) {
- AioHandler *tmp;
- int revents;
+ qemu_lockcnt_inc(&ctx->list_lock);
- ctx->walking_handlers++;
+ QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
+ int revents;
revents = node->pfd.revents & node->pfd.events;
node->pfd.revents = 0;
@@ -337,15 +417,36 @@ bool aio_dispatch(AioContext *ctx)
progress = true;
}
- tmp = node;
- node = QLIST_NEXT(node, node);
+ if (node->deleted) {
+ if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
+ QLIST_REMOVE(node, node);
+ g_free(node);
+ qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
+ }
+ }
+ }
- ctx->walking_handlers--;
+ qemu_lockcnt_dec(&ctx->list_lock);
+ return progress;
+}
- if (!ctx->walking_handlers && tmp->deleted) {
- QLIST_REMOVE(tmp, node);
- g_free(tmp);
- }
+/*
+ * Note that dispatch_fds == false has the side-effect of post-poning the
+ * freeing of deleted handlers.
+ */
+bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
+{
+ bool progress;
+
+ /*
+ * If there are callbacks left that have been queued, we need to call them.
+ * Do not call select in this case, because it is possible that the caller
+ * does not need a complete flush (as is the case for aio_poll loops).
+ */
+ progress = aio_bh_poll(ctx);
+
+ if (dispatch_fds) {
+ progress |= aio_dispatch_handlers(ctx);
}
/* Run our timers */
@@ -400,12 +501,100 @@ static void add_pollfd(AioHandler *node)
npfd++;
}
+static bool run_poll_handlers_once(AioContext *ctx)
+{
+ bool progress = false;
+ AioHandler *node;
+
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
+ if (!node->deleted && node->io_poll &&
+ node->io_poll(node->opaque)) {
+ progress = true;
+ }
+
+ /* Caller handles freeing deleted nodes. Don't do it here. */
+ }
+
+ return progress;
+}
+
+/* run_poll_handlers:
+ * @ctx: the AioContext
+ * @max_ns: maximum time to poll for, in nanoseconds
+ *
+ * Polls for a given time.
+ *
+ * Note that ctx->notify_me must be non-zero so this function can detect
+ * aio_notify().
+ *
+ * Note that the caller must have incremented ctx->list_lock.
+ *
+ * Returns: true if progress was made, false otherwise
+ */
+static bool run_poll_handlers(AioContext *ctx, int64_t max_ns)
+{
+ bool progress;
+ int64_t end_time;
+
+ assert(ctx->notify_me);
+ assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
+ assert(ctx->poll_disable_cnt == 0);
+
+ trace_run_poll_handlers_begin(ctx, max_ns);
+
+ end_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + max_ns;
+
+ do {
+ progress = run_poll_handlers_once(ctx);
+ } while (!progress && qemu_clock_get_ns(QEMU_CLOCK_REALTIME) < end_time);
+
+ trace_run_poll_handlers_end(ctx, progress);
+
+ return progress;
+}
+
+/* try_poll_mode:
+ * @ctx: the AioContext
+ * @blocking: busy polling is only attempted when blocking is true
+ *
+ * ctx->notify_me must be non-zero so this function can detect aio_notify().
+ *
+ * Note that the caller must have incremented ctx->list_lock.
+ *
+ * Returns: true if progress was made, false otherwise
+ */
+static bool try_poll_mode(AioContext *ctx, bool blocking)
+{
+ if (blocking && ctx->poll_max_ns && ctx->poll_disable_cnt == 0) {
+ /* See qemu_soonest_timeout() uint64_t hack */
+ int64_t max_ns = MIN((uint64_t)aio_compute_timeout(ctx),
+ (uint64_t)ctx->poll_ns);
+
+ if (max_ns) {
+ poll_set_started(ctx, true);
+
+ if (run_poll_handlers(ctx, max_ns)) {
+ return true;
+ }
+ }
+ }
+
+ poll_set_started(ctx, false);
+
+ /* Even if we don't run busy polling, try polling once in case it can make
+ * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
+ */
+ return run_poll_handlers_once(ctx);
+}
+
bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandler *node;
- int i, ret;
+ int i;
+ int ret = 0;
bool progress;
int64_t timeout;
+ int64_t start = 0;
aio_context_acquire(ctx);
progress = false;
@@ -421,43 +610,93 @@ bool aio_poll(AioContext *ctx, bool blocking)
atomic_add(&ctx->notify_me, 2);
}
- ctx->walking_handlers++;
+ qemu_lockcnt_inc(&ctx->list_lock);
+
+ if (ctx->poll_max_ns) {
+ start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ }
- assert(npfd == 0);
+ if (try_poll_mode(ctx, blocking)) {
+ progress = true;
+ } else {
+ assert(npfd == 0);
- /* fill pollfds */
+ /* fill pollfds */
- if (!aio_epoll_enabled(ctx)) {
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
- if (!node->deleted && node->pfd.events
- && aio_node_check(ctx, node->is_external)) {
- add_pollfd(node);
+ if (!aio_epoll_enabled(ctx)) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
+ if (!node->deleted && node->pfd.events
+ && aio_node_check(ctx, node->is_external)) {
+ add_pollfd(node);
+ }
}
}
- }
- timeout = blocking ? aio_compute_timeout(ctx) : 0;
+ timeout = blocking ? aio_compute_timeout(ctx) : 0;
- /* wait until next event */
- if (timeout) {
- aio_context_release(ctx);
- }
- if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
- AioHandler epoll_handler;
-
- epoll_handler.pfd.fd = ctx->epollfd;
- epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
- npfd = 0;
- add_pollfd(&epoll_handler);
- ret = aio_epoll(ctx, pollfds, npfd, timeout);
- } else {
- ret = qemu_poll_ns(pollfds, npfd, timeout);
+ /* wait until next event */
+ if (timeout) {
+ aio_context_release(ctx);
+ }
+ if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
+ AioHandler epoll_handler;
+
+ epoll_handler.pfd.fd = ctx->epollfd;
+ epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
+ npfd = 0;
+ add_pollfd(&epoll_handler);
+ ret = aio_epoll(ctx, pollfds, npfd, timeout);
+ } else {
+ ret = qemu_poll_ns(pollfds, npfd, timeout);
+ }
+ if (timeout) {
+ aio_context_acquire(ctx);
+ }
}
+
if (blocking) {
atomic_sub(&ctx->notify_me, 2);
}
- if (timeout) {
- aio_context_acquire(ctx);
+
+ /* Adjust polling time */
+ if (ctx->poll_max_ns) {
+ int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
+
+ if (block_ns <= ctx->poll_ns) {
+ /* This is the sweet spot, no adjustment needed */
+ } else if (block_ns > ctx->poll_max_ns) {
+ /* We'd have to poll for too long, poll less */
+ int64_t old = ctx->poll_ns;
+
+ if (ctx->poll_shrink) {
+ ctx->poll_ns /= ctx->poll_shrink;
+ } else {
+ ctx->poll_ns = 0;
+ }
+
+ trace_poll_shrink(ctx, old, ctx->poll_ns);
+ } else if (ctx->poll_ns < ctx->poll_max_ns &&
+ block_ns < ctx->poll_max_ns) {
+ /* There is room to grow, poll longer */
+ int64_t old = ctx->poll_ns;
+ int64_t grow = ctx->poll_grow;
+
+ if (grow == 0) {
+ grow = 2;
+ }
+
+ if (ctx->poll_ns) {
+ ctx->poll_ns *= grow;
+ } else {
+ ctx->poll_ns = 4000; /* start polling at 4 microseconds */
+ }
+
+ if (ctx->poll_ns > ctx->poll_max_ns) {
+ ctx->poll_ns = ctx->poll_max_ns;
+ }
+
+ trace_poll_grow(ctx, old, ctx->poll_ns);
+ }
}
aio_notify_accept(ctx);
@@ -470,10 +709,10 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
npfd = 0;
- ctx->walking_handlers--;
+ qemu_lockcnt_dec(&ctx->list_lock);
/* Run dispatch even if there were no readable fds to run timers */
- if (aio_dispatch(ctx)) {
+ if (aio_dispatch(ctx, ret > 0)) {
progress = true;
}
@@ -484,6 +723,13 @@ bool aio_poll(AioContext *ctx, bool blocking)
void aio_context_setup(AioContext *ctx)
{
+ /* TODO remove this in final patch submission */
+ if (getenv("QEMU_AIO_POLL_MAX_NS")) {
+ fprintf(stderr, "The QEMU_AIO_POLL_MAX_NS environment variable has "
+ "been replaced with -object iothread,poll-max-ns=NUM\n");
+ exit(1);
+ }
+
#ifdef CONFIG_EPOLL_CREATE1
assert(!ctx->epollfd);
ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
@@ -495,3 +741,17 @@ void aio_context_setup(AioContext *ctx)
}
#endif
}
+
+void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
+ int64_t grow, int64_t shrink, Error **errp)
+{
+ /* No thread synchronization here, it doesn't matter if an incorrect value
+ * is used once.
+ */
+ ctx->poll_max_ns = max_ns;
+ ctx->poll_ns = 0;
+ ctx->poll_grow = grow;
+ ctx->poll_shrink = shrink;
+
+ aio_notify(ctx);
+}
diff --git a/aio-win32.c b/aio-win32.c
index c8c249e260..900524c9c2 100644
--- a/aio-win32.c
+++ b/aio-win32.c
@@ -20,6 +20,8 @@
#include "block/block.h"
#include "qemu/queue.h"
#include "qemu/sockets.h"
+#include "qapi/error.h"
+#include "qemu/rcu_queue.h"
struct AioHandler {
EventNotifier *e;
@@ -38,11 +40,13 @@ void aio_set_fd_handler(AioContext *ctx,
bool is_external,
IOHandler *io_read,
IOHandler *io_write,
+ AioPollFn *io_poll,
void *opaque)
{
/* fd is a SOCKET in our case */
AioHandler *node;
+ qemu_lockcnt_lock(&ctx->list_lock);
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->pfd.fd == fd && !node->deleted) {
break;
@@ -52,14 +56,14 @@ void aio_set_fd_handler(AioContext *ctx,
/* Are we deleting the fd handler? */
if (!io_read && !io_write) {
if (node) {
- /* If the lock is held, just mark the node as deleted */
- if (ctx->walking_handlers) {
+ /* If aio_poll is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
node->pfd.revents = 0;
} else {
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up after
- * releasing the walking_handlers lock.
+ * releasing the list_lock.
*/
QLIST_REMOVE(node, node);
g_free(node);
@@ -72,7 +76,7 @@ void aio_set_fd_handler(AioContext *ctx,
/* Alloc and insert if it's not already there */
node = g_new0(AioHandler, 1);
node->pfd.fd = fd;
- QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
}
node->pfd.events = 0;
@@ -97,16 +101,26 @@ void aio_set_fd_handler(AioContext *ctx,
FD_CONNECT | FD_WRITE | FD_OOB);
}
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
}
+void aio_set_fd_poll(AioContext *ctx, int fd,
+ IOHandler *io_poll_begin,
+ IOHandler *io_poll_end)
+{
+ /* Not implemented */
+}
+
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *e,
bool is_external,
- EventNotifierHandler *io_notify)
+ EventNotifierHandler *io_notify,
+ AioPollFn *io_poll)
{
AioHandler *node;
+ qemu_lockcnt_lock(&ctx->list_lock);
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->e == e && !node->deleted) {
break;
@@ -118,14 +132,14 @@ void aio_set_event_notifier(AioContext *ctx,
if (node) {
g_source_remove_poll(&ctx->source, &node->pfd);
- /* If the lock is held, just mark the node as deleted */
- if (ctx->walking_handlers) {
+ /* aio_poll is in progress, just mark the node as deleted */
+ if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
node->pfd.revents = 0;
} else {
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up after
- * releasing the walking_handlers lock.
+ * releasing the list_lock.
*/
QLIST_REMOVE(node, node);
g_free(node);
@@ -139,7 +153,7 @@ void aio_set_event_notifier(AioContext *ctx,
node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
node->pfd.events = G_IO_IN;
node->is_external = is_external;
- QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
+ QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
g_source_add_poll(&ctx->source, &node->pfd);
}
@@ -147,9 +161,18 @@ void aio_set_event_notifier(AioContext *ctx,
node->io_notify = io_notify;
}
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
}
+void aio_set_event_notifier_poll(AioContext *ctx,
+ EventNotifier *notifier,
+ EventNotifierHandler *io_poll_begin,
+ EventNotifierHandler *io_poll_end)
+{
+ /* Not implemented */
+}
+
bool aio_prepare(AioContext *ctx)
{
static struct timeval tv0;
@@ -157,10 +180,16 @@ bool aio_prepare(AioContext *ctx)
bool have_select_revents = false;
fd_set rfds, wfds;
+ /*
+ * We have to walk very carefully in case aio_set_fd_handler is
+ * called while we're walking.
+ */
+ qemu_lockcnt_inc(&ctx->list_lock);
+
/* fill fd sets */
FD_ZERO(&rfds);
FD_ZERO(&wfds);
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (node->io_read) {
FD_SET ((SOCKET)node->pfd.fd, &rfds);
}
@@ -170,7 +199,7 @@ bool aio_prepare(AioContext *ctx)
}
if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
node->pfd.revents = 0;
if (FD_ISSET(node->pfd.fd, &rfds)) {
node->pfd.revents |= G_IO_IN;
@@ -184,45 +213,55 @@ bool aio_prepare(AioContext *ctx)
}
}
+ qemu_lockcnt_dec(&ctx->list_lock);
return have_select_revents;
}
bool aio_pending(AioContext *ctx)
{
AioHandler *node;
+ bool result = false;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ /*
+ * We have to walk very carefully in case aio_set_fd_handler is
+ * called while we're walking.
+ */
+ qemu_lockcnt_inc(&ctx->list_lock);
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (node->pfd.revents && node->io_notify) {
- return true;
+ result = true;
+ break;
}
if ((node->pfd.revents & G_IO_IN) && node->io_read) {
- return true;
+ result = true;
+ break;
}
if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
- return true;
+ result = true;
+ break;
}
}
- return false;
+ qemu_lockcnt_dec(&ctx->list_lock);
+ return result;
}
static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
{
AioHandler *node;
bool progress = false;
+ AioHandler *tmp;
+
+ qemu_lockcnt_inc(&ctx->list_lock);
/*
* We have to walk very carefully in case aio_set_fd_handler is
* called while we're walking.
*/
- node = QLIST_FIRST(&ctx->aio_handlers);
- while (node) {
- AioHandler *tmp;
+ QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
int revents = node->pfd.revents;
- ctx->walking_handlers++;
-
if (!node->deleted &&
(revents || event_notifier_get_handle(node->e) == event) &&
node->io_notify) {
@@ -257,26 +296,27 @@ static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
}
}
- tmp = node;
- node = QLIST_NEXT(node, node);
-
- ctx->walking_handlers--;
-
- if (!ctx->walking_handlers && tmp->deleted) {
- QLIST_REMOVE(tmp, node);
- g_free(tmp);
+ if (node->deleted) {
+ if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
+ QLIST_REMOVE(node, node);
+ g_free(node);
+ qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
+ }
}
}
+ qemu_lockcnt_dec(&ctx->list_lock);
return progress;
}
-bool aio_dispatch(AioContext *ctx)
+bool aio_dispatch(AioContext *ctx, bool dispatch_fds)
{
bool progress;
progress = aio_bh_poll(ctx);
- progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
+ if (dispatch_fds) {
+ progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
+ }
progress |= timerlistgroup_run_timers(&ctx->tlg);
return progress;
}
@@ -303,20 +343,19 @@ bool aio_poll(AioContext *ctx, bool blocking)
atomic_add(&ctx->notify_me, 2);
}
+ qemu_lockcnt_inc(&ctx->list_lock);
have_select_revents = aio_prepare(ctx);
- ctx->walking_handlers++;
-
/* fill fd sets */
count = 0;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_notify
&& aio_node_check(ctx, node->is_external)) {
events[count++] = event_notifier_get_handle(node->e);
}
}
- ctx->walking_handlers--;
+ qemu_lockcnt_dec(&ctx->list_lock);
first = true;
/* ctx->notifier is always registered. */
@@ -374,3 +413,9 @@ bool aio_poll(AioContext *ctx, bool blocking)
void aio_context_setup(AioContext *ctx)
{
}
+
+void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
+ int64_t grow, int64_t shrink, Error **errp)
+{
+ error_setg(errp, "AioContext polling is not implemented on Windows");
+}
diff --git a/arch_init.c b/arch_init.c
index 5cc58b2c35..0810116144 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -28,7 +28,6 @@
#include "sysemu/arch_init.h"
#include "hw/pci/pci.h"
#include "hw/audio/audio.h"
-#include "hw/smbios/smbios.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
#include "qmp-commands.h"
@@ -64,6 +63,8 @@ int graphic_depth = 32;
#define QEMU_ARCH QEMU_ARCH_MIPS
#elif defined(TARGET_MOXIE)
#define QEMU_ARCH QEMU_ARCH_MOXIE
+#elif defined(TARGET_NIOS2)
+#define QEMU_ARCH QEMU_ARCH_NIOS2
#elif defined(TARGET_OPENRISC)
#define QEMU_ARCH QEMU_ARCH_OPENRISC
#elif defined(TARGET_PPC)
@@ -84,33 +85,6 @@ int graphic_depth = 32;
const uint32_t arch_type = QEMU_ARCH;
-static struct defconfig_file {
- const char *filename;
- /* Indicates it is an user config file (disabled by -no-user-config) */
- bool userconfig;
-} default_config_files[] = {
- { CONFIG_QEMU_CONFDIR "/qemu.conf", true },
- { NULL }, /* end of list */
-};
-
-int qemu_read_default_config_files(bool userconfig)
-{
- int ret;
- struct defconfig_file *f;
-
- for (f = default_config_files; f->filename; f++) {
- if (!userconfig && f->userconfig) {
- continue;
- }
- ret = qemu_read_config_file(f->filename);
- if (ret < 0 && ret != -ENOENT) {
- return ret;
- }
- }
-
- return 0;
-}
-
struct soundhw {
const char *name;
const char *descr;
@@ -235,26 +209,6 @@ void audio_init(void)
}
}
-void do_acpitable_option(const QemuOpts *opts)
-{
-#ifdef TARGET_I386
- Error *err = NULL;
-
- acpi_table_add(opts, &err);
- if (err) {
- error_reportf_err(err, "Wrong acpi table provided: ");
- exit(1);
- }
-#endif
-}
-
-void do_smbios_option(QemuOpts *opts)
-{
-#ifdef TARGET_I386
- smbios_entry_add(opts);
-#endif
-}
-
int kvm_available(void)
{
#ifdef CONFIG_KVM
diff --git a/async.c b/async.c
index b2de360c23..0d218ab0e0 100644
--- a/async.c
+++ b/async.c
@@ -53,14 +53,14 @@ void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
.cb = cb,
.opaque = opaque,
};
- qemu_mutex_lock(&ctx->bh_lock);
+ qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
bh->scheduled = 1;
bh->deleted = 1;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
}
@@ -73,12 +73,12 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
.cb = cb,
.opaque = opaque,
};
- qemu_mutex_lock(&ctx->bh_lock);
+ qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
return bh;
}
@@ -92,14 +92,13 @@ int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
int ret;
+ bool deleted = false;
- ctx->walking_bh++;
+ qemu_lockcnt_inc(&ctx->list_lock);
ret = 0;
- for (bh = ctx->first_bh; bh; bh = next) {
- /* Make sure that fetching bh happens before accessing its members */
- smp_read_barrier_depends();
- next = bh->next;
+ for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
+ next = atomic_rcu_read(&bh->next);
/* The atomic_xchg is paired with the one in qemu_bh_schedule. The
* implicit memory barrier ensures that the callback sees all writes
* done by the scheduling thread. It also ensures that the scheduling
@@ -114,13 +113,18 @@ int aio_bh_poll(AioContext *ctx)
bh->idle = 0;
aio_bh_call(bh);
}
+ if (bh->deleted) {
+ deleted = true;
+ }
}
- ctx->walking_bh--;
-
/* remove deleted bhs */
- if (!ctx->walking_bh) {
- qemu_mutex_lock(&ctx->bh_lock);
+ if (!deleted) {
+ qemu_lockcnt_dec(&ctx->list_lock);
+ return ret;
+ }
+
+ if (qemu_lockcnt_dec_and_lock(&ctx->list_lock)) {
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
@@ -131,9 +135,8 @@ int aio_bh_poll(AioContext *ctx)
bhp = &bh->next;
}
}
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
}
-
return ret;
}
@@ -187,7 +190,8 @@ aio_compute_timeout(AioContext *ctx)
int timeout = -1;
QEMUBH *bh;
- for (bh = ctx->first_bh; bh; bh = bh->next) {
+ for (bh = atomic_rcu_read(&ctx->first_bh); bh;
+ bh = atomic_rcu_read(&bh->next)) {
if (bh->scheduled) {
if (bh->idle) {
/* idle bottom halves will be polled at least
@@ -251,7 +255,7 @@ aio_ctx_dispatch(GSource *source,
AioContext *ctx = (AioContext *) source;
assert(callback == NULL);
- aio_dispatch(ctx);
+ aio_dispatch(ctx, true);
return true;
}
@@ -270,7 +274,8 @@ aio_ctx_finalize(GSource *source)
}
#endif
- qemu_mutex_lock(&ctx->bh_lock);
+ qemu_lockcnt_lock(&ctx->list_lock);
+ assert(!qemu_lockcnt_count(&ctx->list_lock));
while (ctx->first_bh) {
QEMUBH *next = ctx->first_bh->next;
@@ -280,12 +285,12 @@ aio_ctx_finalize(GSource *source)
g_free(ctx->first_bh);
ctx->first_bh = next;
}
- qemu_mutex_unlock(&ctx->bh_lock);
+ qemu_lockcnt_unlock(&ctx->list_lock);
- aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
+ aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
qemu_rec_mutex_destroy(&ctx->lock);
- qemu_mutex_destroy(&ctx->bh_lock);
+ qemu_lockcnt_destroy(&ctx->list_lock);
timerlistgroup_deinit(&ctx->tlg);
}
@@ -349,6 +354,15 @@ static void event_notifier_dummy_cb(EventNotifier *e)
{
}
+/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
+static bool event_notifier_poll(void *opaque)
+{
+ EventNotifier *e = opaque;
+ AioContext *ctx = container_of(e, AioContext, notifier);
+
+ return atomic_read(&ctx->notified);
+}
+
AioContext *aio_context_new(Error **errp)
{
int ret;
@@ -363,18 +377,24 @@ AioContext *aio_context_new(Error **errp)
goto fail;
}
g_source_set_can_recurse(&ctx->source, true);
+ qemu_lockcnt_init(&ctx->list_lock);
aio_set_event_notifier(ctx, &ctx->notifier,
false,
(EventNotifierHandler *)
- event_notifier_dummy_cb);
+ event_notifier_dummy_cb,
+ event_notifier_poll);
#ifdef CONFIG_LINUX_AIO
ctx->linux_aio = NULL;
#endif
ctx->thread_pool = NULL;
- qemu_mutex_init(&ctx->bh_lock);
qemu_rec_mutex_init(&ctx->lock);
timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
+ ctx->poll_ns = 0;
+ ctx->poll_max_ns = 0;
+ ctx->poll_grow = 0;
+ ctx->poll_shrink = 0;
+
return ctx;
fail:
g_source_destroy(&ctx->source);
diff --git a/backends/baum.c b/backends/baum.c
index b92369d840..b045ef49c5 100644
--- a/backends/baum.c
+++ b/backends/baum.c
@@ -27,12 +27,10 @@
#include "sysemu/char.h"
#include "qemu/timer.h"
#include "hw/usb.h"
+#include "ui/console.h"
#include <brlapi.h>
#include <brlapi_constants.h>
#include <brlapi_keycodes.h>
-#ifdef CONFIG_SDL
-#include <SDL_syswm.h>
-#endif
#if 0
#define DPRINTF(fmt, ...) \
@@ -227,12 +225,8 @@ static const uint8_t nabcc_translation[2][256] = {
/* The guest OS has started discussing with us, finish initializing BrlAPI */
static int baum_deferred_init(BaumDriverState *baum)
{
-#if defined(CONFIG_SDL)
-#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
- SDL_SysWMinfo info;
-#endif
-#endif
- int tty;
+ int tty = BRLAPI_TTY_DEFAULT;
+ QemuConsole *con;
if (baum->deferred_init) {
return 1;
@@ -243,21 +237,12 @@ static int baum_deferred_init(BaumDriverState *baum)
return 0;
}
-#if defined(CONFIG_SDL)
-#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
- memset(&info, 0, sizeof(info));
- SDL_VERSION(&info.version);
- if (SDL_GetWMInfo(&info)) {
- tty = info.info.x11.wmwindow;
- } else {
-#endif
-#endif
- tty = BRLAPI_TTY_DEFAULT;
-#if defined(CONFIG_SDL)
-#if SDL_COMPILEDVERSION < SDL_VERSIONNUM(2, 0, 0)
+ con = qemu_console_lookup_by_index(0);
+ if (con && qemu_console_is_graphic(con)) {
+ tty = qemu_console_get_window_id(con);
+ if (tty == -1)
+ tty = BRLAPI_TTY_DEFAULT;
}
-#endif
-#endif
if (brlapi__enterTtyMode(baum->brlapi, tty, NULL) == -1) {
brlapi_perror("baum: brlapi__enterTtyMode");
diff --git a/backends/cryptodev-builtin.c b/backends/cryptodev-builtin.c
index eda954b2a2..82a068e792 100644
--- a/backends/cryptodev-builtin.c
+++ b/backends/cryptodev-builtin.c
@@ -94,6 +94,8 @@ static void cryptodev_builtin_init(
backend->conf.max_size = LONG_MAX - sizeof(CryptoDevBackendSymOpInfo);
backend->conf.max_cipher_key_len = CRYPTODEV_BUITLIN_MAX_CIPHER_KEY_LEN;
backend->conf.max_auth_key_len = CRYPTODEV_BUITLIN_MAX_AUTH_KEY_LEN;
+
+ cryptodev_backend_set_ready(backend, true);
}
static int
@@ -111,23 +113,42 @@ cryptodev_builtin_get_unused_session_index(
return -1;
}
+#define AES_KEYSIZE_128 16
+#define AES_KEYSIZE_192 24
+#define AES_KEYSIZE_256 32
+#define AES_KEYSIZE_128_XTS AES_KEYSIZE_256
+#define AES_KEYSIZE_256_XTS 64
+
static int
-cryptodev_builtin_get_aes_algo(uint32_t key_len, Error **errp)
+cryptodev_builtin_get_aes_algo(uint32_t key_len, int mode, Error **errp)
{
int algo;
- if (key_len == 128 / 8) {
+ if (key_len == AES_KEYSIZE_128) {
algo = QCRYPTO_CIPHER_ALG_AES_128;
- } else if (key_len == 192 / 8) {
+ } else if (key_len == AES_KEYSIZE_192) {
algo = QCRYPTO_CIPHER_ALG_AES_192;
- } else if (key_len == 256 / 8) {
- algo = QCRYPTO_CIPHER_ALG_AES_256;
+ } else if (key_len == AES_KEYSIZE_256) { /* equals AES_KEYSIZE_128_XTS */
+ if (mode == QCRYPTO_CIPHER_MODE_XTS) {
+ algo = QCRYPTO_CIPHER_ALG_AES_128;
+ } else {
+ algo = QCRYPTO_CIPHER_ALG_AES_256;
+ }
+ } else if (key_len == AES_KEYSIZE_256_XTS) {
+ if (mode == QCRYPTO_CIPHER_MODE_XTS) {
+ algo = QCRYPTO_CIPHER_ALG_AES_256;
+ } else {
+ goto err;
+ }
} else {
- error_setg(errp, "Unsupported key length :%u", key_len);
- return -1;
+ goto err;
}
return algo;
+
+err:
+ error_setg(errp, "Unsupported key length :%u", key_len);
+ return -1;
}
static int cryptodev_builtin_create_cipher_session(
@@ -155,32 +176,48 @@ static int cryptodev_builtin_create_cipher_session(
switch (sess_info->cipher_alg) {
case VIRTIO_CRYPTO_CIPHER_AES_ECB:
+ mode = QCRYPTO_CIPHER_MODE_ECB;
algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
- errp);
+ mode, errp);
if (algo < 0) {
return -1;
}
- mode = QCRYPTO_CIPHER_MODE_ECB;
break;
case VIRTIO_CRYPTO_CIPHER_AES_CBC:
+ mode = QCRYPTO_CIPHER_MODE_CBC;
algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
- errp);
+ mode, errp);
if (algo < 0) {
return -1;
}
- mode = QCRYPTO_CIPHER_MODE_CBC;
break;
case VIRTIO_CRYPTO_CIPHER_AES_CTR:
+ mode = QCRYPTO_CIPHER_MODE_CTR;
algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
- errp);
+ mode, errp);
+ if (algo < 0) {
+ return -1;
+ }
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_XTS:
+ mode = QCRYPTO_CIPHER_MODE_XTS;
+ algo = cryptodev_builtin_get_aes_algo(sess_info->key_len,
+ mode, errp);
if (algo < 0) {
return -1;
}
- mode = QCRYPTO_CIPHER_MODE_CTR;
break;
- case VIRTIO_CRYPTO_CIPHER_DES_ECB:
- algo = QCRYPTO_CIPHER_ALG_DES_RFB;
+ case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
mode = QCRYPTO_CIPHER_MODE_ECB;
+ algo = QCRYPTO_CIPHER_ALG_3DES;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
+ mode = QCRYPTO_CIPHER_MODE_CBC;
+ algo = QCRYPTO_CIPHER_ALG_3DES;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
+ mode = QCRYPTO_CIPHER_MODE_CTR;
+ algo = QCRYPTO_CIPHER_ALG_3DES;
break;
default:
error_setg(errp, "Unsupported cipher alg :%u",
@@ -331,6 +368,8 @@ static void cryptodev_builtin_cleanup(
backend->conf.peers.ccs[i] = NULL;
}
}
+
+ cryptodev_backend_set_ready(backend, false);
}
static void
diff --git a/backends/cryptodev.c b/backends/cryptodev.c
index 4a49f9762f..832f056266 100644
--- a/backends/cryptodev.c
+++ b/backends/cryptodev.c
@@ -73,8 +73,6 @@ void cryptodev_backend_cleanup(
if (bc->cleanup) {
bc->cleanup(backend, errp);
}
-
- backend->ready = false;
}
int64_t cryptodev_backend_sym_create_session(
@@ -189,14 +187,39 @@ cryptodev_backend_complete(UserCreatable *uc, Error **errp)
goto out;
}
}
- backend->ready = true;
+
return;
out:
- backend->ready = false;
error_propagate(errp, local_err);
}
+void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used)
+{
+ backend->is_used = used;
+}
+
+bool cryptodev_backend_is_used(CryptoDevBackend *backend)
+{
+ return backend->is_used;
+}
+
+void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready)
+{
+ backend->ready = ready;
+}
+
+bool cryptodev_backend_is_ready(CryptoDevBackend *backend)
+{
+ return backend->ready;
+}
+
+static bool
+cryptodev_backend_can_be_deleted(UserCreatable *uc, Error **errp)
+{
+ return !cryptodev_backend_is_used(CRYPTODEV_BACKEND(uc));
+}
+
static void cryptodev_backend_instance_init(Object *obj)
{
object_property_add(obj, "queues", "int",
@@ -209,7 +232,9 @@ static void cryptodev_backend_instance_init(Object *obj)
static void cryptodev_backend_finalize(Object *obj)
{
+ CryptoDevBackend *backend = CRYPTODEV_BACKEND(obj);
+ cryptodev_backend_cleanup(backend, NULL);
}
static void
@@ -218,6 +243,7 @@ cryptodev_backend_class_init(ObjectClass *oc, void *data)
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->complete = cryptodev_backend_complete;
+ ucc->can_be_deleted = cryptodev_backend_can_be_deleted;
QTAILQ_INIT(&crypto_clients);
}
diff --git a/backends/hostmem.c b/backends/hostmem.c
index 4256d24acb..7f5de70609 100644
--- a/backends/hostmem.c
+++ b/backends/hostmem.c
@@ -348,6 +348,24 @@ host_memory_backend_can_be_deleted(UserCreatable *uc, Error **errp)
}
}
+static char *get_id(Object *o, Error **errp)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(o);
+
+ return g_strdup(backend->id);
+}
+
+static void set_id(Object *o, const char *str, Error **errp)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(o);
+
+ if (backend->id) {
+ error_setg(errp, "cannot change property value");
+ return;
+ }
+ backend->id = g_strdup(str);
+}
+
static void
host_memory_backend_class_init(ObjectClass *oc, void *data)
{
@@ -377,6 +395,13 @@ host_memory_backend_class_init(ObjectClass *oc, void *data)
HostMemPolicy_lookup,
host_memory_backend_get_policy,
host_memory_backend_set_policy, &error_abort);
+ object_class_property_add_str(oc, "id", get_id, set_id, &error_abort);
+}
+
+static void host_memory_backend_finalize(Object *o)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(o);
+ g_free(backend->id);
}
static const TypeInfo host_memory_backend_info = {
@@ -387,6 +412,7 @@ static const TypeInfo host_memory_backend_info = {
.class_init = host_memory_backend_class_init,
.instance_size = sizeof(HostMemoryBackend),
.instance_init = host_memory_backend_init,
+ .instance_finalize = host_memory_backend_finalize,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
diff --git a/block.c b/block.c
index 39ddea3411..a0346c80c6 100644
--- a/block.c
+++ b/block.c
@@ -1851,7 +1851,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
bdrv_refresh_filename(bs);
/* Check if any unknown options were used */
- if (options && (qdict_size(options) != 0)) {
+ if (qdict_size(options) != 0) {
const QDictEntry *entry = qdict_first(options);
if (flags & BDRV_O_PROTOCOL) {
error_setg(errp, "Block protocol '%s' doesn't support the option "
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 67a036a1df..0b8fd06f27 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -1,4 +1,4 @@
-block-obj-y += raw_bsd.o qcow.o vdi.o vmdk.o cloop.o bochs.o vpc.o vvfat.o dmg.o
+block-obj-y += raw-format.o qcow.o vdi.o vmdk.o cloop.o bochs.o vpc.o vvfat.o dmg.o
block-obj-y += qcow2.o qcow2-refcount.o qcow2-cluster.o qcow2-snapshot.o qcow2-cache.o
block-obj-y += qed.o qed-gencb.o qed-l2-cache.o qed-table.o qed-cluster.o
block-obj-y += qed-check.o
@@ -6,8 +6,8 @@ block-obj-y += vhdx.o vhdx-endian.o vhdx-log.o
block-obj-y += quorum.o
block-obj-y += parallels.o blkdebug.o blkverify.o blkreplay.o
block-obj-y += block-backend.o snapshot.o qapi.o
-block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o
-block-obj-$(CONFIG_POSIX) += raw-posix.o
+block-obj-$(CONFIG_WIN32) += file-win32.o win32-aio.o
+block-obj-$(CONFIG_POSIX) += file-posix.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
block-obj-y += null.o mirror.o commit.o io.o
block-obj-y += throttle-groups.o
diff --git a/block/blkdebug.c b/block/blkdebug.c
index 4127571454..acccf85666 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -58,10 +58,6 @@ typedef struct BlkdebugSuspendedReq {
QLIST_ENTRY(BlkdebugSuspendedReq) next;
} BlkdebugSuspendedReq;
-static const AIOCBInfo blkdebug_aiocb_info = {
- .aiocb_size = sizeof(BlkdebugAIOCB),
-};
-
enum {
ACTION_INJECT_ERROR,
ACTION_SET_STATE,
@@ -77,7 +73,7 @@ typedef struct BlkdebugRule {
int error;
int immediately;
int once;
- int64_t sector;
+ int64_t offset;
} inject;
struct {
int new_state;
@@ -174,6 +170,7 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
const char* event_name;
BlkdebugEvent event;
struct BlkdebugRule *rule;
+ int64_t sector;
/* Find the right event for the rule */
event_name = qemu_opt_get(opts, "event");
@@ -200,7 +197,9 @@ static int add_rule(void *opaque, QemuOpts *opts, Error **errp)
rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
rule->options.inject.immediately =
qemu_opt_get_bool(opts, "immediately", 0);
- rule->options.inject.sector = qemu_opt_get_number(opts, "sector", -1);
+ sector = qemu_opt_get_number(opts, "sector", -1);
+ rule->options.inject.offset =
+ sector == -1 ? -1 : sector * BDRV_SECTOR_SIZE;
break;
case ACTION_SET_STATE:
@@ -408,17 +407,14 @@ out:
static void error_callback_bh(void *opaque)
{
- struct BlkdebugAIOCB *acb = opaque;
- acb->common.cb(acb->common.opaque, acb->ret);
- qemu_aio_unref(acb);
+ Coroutine *co = opaque;
+ qemu_coroutine_enter(co);
}
-static BlockAIOCB *inject_error(BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque, BlkdebugRule *rule)
+static int inject_error(BlockDriverState *bs, BlkdebugRule *rule)
{
BDRVBlkdebugState *s = bs->opaque;
int error = rule->options.inject.error;
- struct BlkdebugAIOCB *acb;
bool immediately = rule->options.inject.immediately;
if (rule->options.inject.once) {
@@ -426,81 +422,79 @@ static BlockAIOCB *inject_error(BlockDriverState *bs,
remove_rule(rule);
}
- if (immediately) {
- return NULL;
+ if (!immediately) {
+ aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh,
+ qemu_coroutine_self());
+ qemu_coroutine_yield();
}
- acb = qemu_aio_get(&blkdebug_aiocb_info, bs, cb, opaque);
- acb->ret = -error;
-
- aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), error_callback_bh, acb);
-
- return &acb->common;
+ return -error;
}
-static BlockAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+static int coroutine_fn
+blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
- if (rule->options.inject.sector == -1 ||
- (rule->options.inject.sector >= sector_num &&
- rule->options.inject.sector < sector_num + nb_sectors)) {
+ uint64_t inject_offset = rule->options.inject.offset;
+
+ if (inject_offset == -1 ||
+ (inject_offset >= offset && inject_offset < offset + bytes))
+ {
break;
}
}
if (rule && rule->options.inject.error) {
- return inject_error(bs, cb, opaque, rule);
+ return inject_error(bs, rule);
}
- return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors,
- cb, opaque);
+ return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
}
-static BlockAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+static int coroutine_fn
+blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
- if (rule->options.inject.sector == -1 ||
- (rule->options.inject.sector >= sector_num &&
- rule->options.inject.sector < sector_num + nb_sectors)) {
+ uint64_t inject_offset = rule->options.inject.offset;
+
+ if (inject_offset == -1 ||
+ (inject_offset >= offset && inject_offset < offset + bytes))
+ {
break;
}
}
if (rule && rule->options.inject.error) {
- return inject_error(bs, cb, opaque, rule);
+ return inject_error(bs, rule);
}
- return bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors,
- cb, opaque);
+ return bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
}
-static BlockAIOCB *blkdebug_aio_flush(BlockDriverState *bs,
- BlockCompletionFunc *cb, void *opaque)
+static int blkdebug_co_flush(BlockDriverState *bs)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugRule *rule = NULL;
QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
- if (rule->options.inject.sector == -1) {
+ if (rule->options.inject.offset == -1) {
break;
}
}
if (rule && rule->options.inject.error) {
- return inject_error(bs, cb, opaque, rule);
+ return inject_error(bs, rule);
}
- return bdrv_aio_flush(bs->file->bs, cb, opaque);
+ return bdrv_co_flush(bs->file->bs);
}
@@ -752,9 +746,9 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_refresh_filename = blkdebug_refresh_filename,
.bdrv_refresh_limits = blkdebug_refresh_limits,
- .bdrv_aio_readv = blkdebug_aio_readv,
- .bdrv_aio_writev = blkdebug_aio_writev,
- .bdrv_aio_flush = blkdebug_aio_flush,
+ .bdrv_co_preadv = blkdebug_co_preadv,
+ .bdrv_co_pwritev = blkdebug_co_pwritev,
+ .bdrv_co_flush_to_disk = blkdebug_co_flush,
.bdrv_debug_event = blkdebug_debug_event,
.bdrv_debug_breakpoint = blkdebug_debug_breakpoint,
diff --git a/block/blkverify.c b/block/blkverify.c
index 28f9af6dba..43a940c2f5 100644
--- a/block/blkverify.c
+++ b/block/blkverify.c
@@ -19,38 +19,36 @@ typedef struct {
BdrvChild *test_file;
} BDRVBlkverifyState;
-typedef struct BlkverifyAIOCB BlkverifyAIOCB;
-struct BlkverifyAIOCB {
- BlockAIOCB common;
+typedef struct BlkverifyRequest {
+ Coroutine *co;
+ BlockDriverState *bs;
/* Request metadata */
bool is_write;
- int64_t sector_num;
- int nb_sectors;
+ uint64_t offset;
+ uint64_t bytes;
+ int flags;
- int ret; /* first completed request's result */
- unsigned int done; /* completion counter */
+ int (*request_fn)(BdrvChild *, int64_t, unsigned int, QEMUIOVector *,
+ BdrvRequestFlags);
- QEMUIOVector *qiov; /* user I/O vector */
- QEMUIOVector raw_qiov; /* cloned I/O vector for raw file */
- void *buf; /* buffer for raw file I/O */
+ int ret; /* test image result */
+ int raw_ret; /* raw image result */
- void (*verify)(BlkverifyAIOCB *acb);
-};
+ unsigned int done; /* completion counter */
-static const AIOCBInfo blkverify_aiocb_info = {
- .aiocb_size = sizeof(BlkverifyAIOCB),
-};
+ QEMUIOVector *qiov; /* user I/O vector */
+ QEMUIOVector *raw_qiov; /* cloned I/O vector for raw file */
+} BlkverifyRequest;
-static void GCC_FMT_ATTR(2, 3) blkverify_err(BlkverifyAIOCB *acb,
+static void GCC_FMT_ATTR(2, 3) blkverify_err(BlkverifyRequest *r,
const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
- fprintf(stderr, "blkverify: %s sector_num=%" PRId64 " nb_sectors=%d ",
- acb->is_write ? "write" : "read", acb->sector_num,
- acb->nb_sectors);
+ fprintf(stderr, "blkverify: %s offset=%" PRId64 " bytes=%" PRId64 " ",
+ r->is_write ? "write" : "read", r->offset, r->bytes);
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
va_end(ap);
@@ -166,113 +164,106 @@ static int64_t blkverify_getlength(BlockDriverState *bs)
return bdrv_getlength(s->test_file->bs);
}
-static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState *bs, bool is_write,
- int64_t sector_num, QEMUIOVector *qiov,
- int nb_sectors,
- BlockCompletionFunc *cb,
- void *opaque)
+static void coroutine_fn blkverify_do_test_req(void *opaque)
{
- BlkverifyAIOCB *acb = qemu_aio_get(&blkverify_aiocb_info, bs, cb, opaque);
-
- acb->is_write = is_write;
- acb->sector_num = sector_num;
- acb->nb_sectors = nb_sectors;
- acb->ret = -EINPROGRESS;
- acb->done = 0;
- acb->qiov = qiov;
- acb->buf = NULL;
- acb->verify = NULL;
- return acb;
+ BlkverifyRequest *r = opaque;
+ BDRVBlkverifyState *s = r->bs->opaque;
+
+ r->ret = r->request_fn(s->test_file, r->offset, r->bytes, r->qiov,
+ r->flags);
+ r->done++;
+ qemu_coroutine_enter_if_inactive(r->co);
}
-static void blkverify_aio_bh(void *opaque)
+static void coroutine_fn blkverify_do_raw_req(void *opaque)
{
- BlkverifyAIOCB *acb = opaque;
+ BlkverifyRequest *r = opaque;
- if (acb->buf) {
- qemu_iovec_destroy(&acb->raw_qiov);
- qemu_vfree(acb->buf);
- }
- acb->common.cb(acb->common.opaque, acb->ret);
- qemu_aio_unref(acb);
+ r->raw_ret = r->request_fn(r->bs->file, r->offset, r->bytes, r->raw_qiov,
+ r->flags);
+ r->done++;
+ qemu_coroutine_enter_if_inactive(r->co);
}
-static void blkverify_aio_cb(void *opaque, int ret)
+static int coroutine_fn
+blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov, QEMUIOVector *raw_qiov,
+ int flags, bool is_write)
{
- BlkverifyAIOCB *acb = opaque;
-
- switch (++acb->done) {
- case 1:
- acb->ret = ret;
- break;
-
- case 2:
- if (acb->ret != ret) {
- blkverify_err(acb, "return value mismatch %d != %d", acb->ret, ret);
- }
-
- if (acb->verify) {
- acb->verify(acb);
- }
+ Coroutine *co_a, *co_b;
+
+ *r = (BlkverifyRequest) {
+ .co = qemu_coroutine_self(),
+ .bs = bs,
+ .offset = offset,
+ .bytes = bytes,
+ .qiov = qiov,
+ .raw_qiov = raw_qiov,
+ .flags = flags,
+ .is_write = is_write,
+ .request_fn = is_write ? bdrv_co_pwritev : bdrv_co_preadv,
+ };
+
+ co_a = qemu_coroutine_create(blkverify_do_test_req, r);
+ co_b = qemu_coroutine_create(blkverify_do_raw_req, r);
+
+ qemu_coroutine_enter(co_a);
+ qemu_coroutine_enter(co_b);
+
+ while (r->done < 2) {
+ qemu_coroutine_yield();
+ }
- aio_bh_schedule_oneshot(bdrv_get_aio_context(acb->common.bs),
- blkverify_aio_bh, acb);
- break;
+ if (r->ret != r->raw_ret) {
+ blkverify_err(r, "return value mismatch %d != %d", r->ret, r->raw_ret);
}
+
+ return r->ret;
}
-static void blkverify_verify_readv(BlkverifyAIOCB *acb)
+static int coroutine_fn
+blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
- ssize_t offset = qemu_iovec_compare(acb->qiov, &acb->raw_qiov);
- if (offset != -1) {
- blkverify_err(acb, "contents mismatch in sector %" PRId64,
- acb->sector_num + (int64_t)(offset / BDRV_SECTOR_SIZE));
+ BlkverifyRequest r;
+ QEMUIOVector raw_qiov;
+ void *buf;
+ ssize_t cmp_offset;
+ int ret;
+
+ buf = qemu_blockalign(bs->file->bs, qiov->size);
+ qemu_iovec_init(&raw_qiov, qiov->niov);
+ qemu_iovec_clone(&raw_qiov, qiov, buf);
+
+ ret = blkverify_co_prwv(bs, &r, offset, bytes, qiov, &raw_qiov, flags,
+ false);
+
+ cmp_offset = qemu_iovec_compare(qiov, &raw_qiov);
+ if (cmp_offset != -1) {
+ blkverify_err(&r, "contents mismatch at offset %" PRId64,
+ offset + cmp_offset);
}
-}
-static BlockAIOCB *blkverify_aio_readv(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
-{
- BDRVBlkverifyState *s = bs->opaque;
- BlkverifyAIOCB *acb = blkverify_aio_get(bs, false, sector_num, qiov,
- nb_sectors, cb, opaque);
-
- acb->verify = blkverify_verify_readv;
- acb->buf = qemu_blockalign(bs->file->bs, qiov->size);
- qemu_iovec_init(&acb->raw_qiov, acb->qiov->niov);
- qemu_iovec_clone(&acb->raw_qiov, qiov, acb->buf);
-
- bdrv_aio_readv(s->test_file, sector_num, qiov, nb_sectors,
- blkverify_aio_cb, acb);
- bdrv_aio_readv(bs->file, sector_num, &acb->raw_qiov, nb_sectors,
- blkverify_aio_cb, acb);
- return &acb->common;
+ qemu_iovec_destroy(&raw_qiov);
+ qemu_vfree(buf);
+
+ return ret;
}
-static BlockAIOCB *blkverify_aio_writev(BlockDriverState *bs,
- int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+static int coroutine_fn
+blkverify_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
- BDRVBlkverifyState *s = bs->opaque;
- BlkverifyAIOCB *acb = blkverify_aio_get(bs, true, sector_num, qiov,
- nb_sectors, cb, opaque);
-
- bdrv_aio_writev(s->test_file, sector_num, qiov, nb_sectors,
- blkverify_aio_cb, acb);
- bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors,
- blkverify_aio_cb, acb);
- return &acb->common;
+ BlkverifyRequest r;
+ return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true);
}
-static BlockAIOCB *blkverify_aio_flush(BlockDriverState *bs,
- BlockCompletionFunc *cb,
- void *opaque)
+static int blkverify_co_flush(BlockDriverState *bs)
{
BDRVBlkverifyState *s = bs->opaque;
/* Only flush test file, the raw file is not important */
- return bdrv_aio_flush(s->test_file->bs, cb, opaque);
+ return bdrv_co_flush(s->test_file->bs);
}
static bool blkverify_recurse_is_first_non_filter(BlockDriverState *bs,
@@ -332,9 +323,9 @@ static BlockDriver bdrv_blkverify = {
.bdrv_getlength = blkverify_getlength,
.bdrv_refresh_filename = blkverify_refresh_filename,
- .bdrv_aio_readv = blkverify_aio_readv,
- .bdrv_aio_writev = blkverify_aio_writev,
- .bdrv_aio_flush = blkverify_aio_flush,
+ .bdrv_co_preadv = blkverify_co_preadv,
+ .bdrv_co_pwritev = blkverify_co_pwritev,
+ .bdrv_co_flush = blkverify_co_flush,
.is_filter = true,
.bdrv_recurse_is_first_non_filter = blkverify_recurse_is_first_non_filter,
diff --git a/block/curl.c b/block/curl.c
index 0404c1b5fa..792fef8269 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -192,19 +192,19 @@ static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action,
switch (action) {
case CURL_POLL_IN:
aio_set_fd_handler(s->aio_context, fd, false,
- curl_multi_read, NULL, state);
+ curl_multi_read, NULL, NULL, state);
break;
case CURL_POLL_OUT:
aio_set_fd_handler(s->aio_context, fd, false,
- NULL, curl_multi_do, state);
+ NULL, curl_multi_do, NULL, state);
break;
case CURL_POLL_INOUT:
aio_set_fd_handler(s->aio_context, fd, false,
- curl_multi_read, curl_multi_do, state);
+ curl_multi_read, curl_multi_do, NULL, state);
break;
case CURL_POLL_REMOVE:
aio_set_fd_handler(s->aio_context, fd, false,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
break;
}
diff --git a/block/raw-posix.c b/block/file-posix.c
index 28b47d977b..28b47d977b 100644
--- a/block/raw-posix.c
+++ b/block/file-posix.c
diff --git a/block/raw-win32.c b/block/file-win32.c
index 800fabdd72..800fabdd72 100644
--- a/block/raw-win32.c
+++ b/block/file-win32.c
diff --git a/block/gluster.c b/block/gluster.c
index a0a74e49fd..1a22f2982d 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -1253,7 +1253,7 @@ static int qemu_gluster_has_zero_init(BlockDriverState *bs)
* If @start is in a trailing hole or beyond EOF, return -ENXIO.
* If we can't find out, return a negative errno other than -ENXIO.
*
- * (Shamefully copied from raw-posix.c, only miniscule adaptions.)
+ * (Shamefully copied from file-posix.c, only miniscule adaptions.)
*/
static int find_allocation(BlockDriverState *bs, off_t start,
off_t *data, off_t *hole)
@@ -1349,7 +1349,7 @@ exit:
* 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
* beyond the end of the disk image it will be clamped.
*
- * (Based on raw_co_get_block_status() from raw-posix.c.)
+ * (Based on raw_co_get_block_status() from file-posix.c.)
*/
static int64_t coroutine_fn qemu_gluster_co_get_block_status(
BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum,
diff --git a/block/io.c b/block/io.c
index 4f005623f7..c42b34a965 100644
--- a/block/io.c
+++ b/block/io.c
@@ -228,9 +228,7 @@ void bdrv_drained_begin(BlockDriverState *bs)
bdrv_parent_drained_begin(bs);
}
- bdrv_io_unplugged_begin(bs);
bdrv_drain_recurse(bs);
- bdrv_io_unplugged_end(bs);
}
void bdrv_drained_end(BlockDriverState *bs)
@@ -302,7 +300,6 @@ void bdrv_drain_all_begin(void)
aio_context_acquire(aio_context);
bdrv_parent_drained_begin(bs);
- bdrv_io_unplugged_begin(bs);
aio_disable_external(aio_context);
aio_context_release(aio_context);
@@ -347,7 +344,6 @@ void bdrv_drain_all_end(void)
aio_context_acquire(aio_context);
aio_enable_external(aio_context);
- bdrv_io_unplugged_end(bs);
bdrv_parent_drained_end(bs);
aio_context_release(aio_context);
}
@@ -2650,7 +2646,7 @@ void bdrv_io_plug(BlockDriverState *bs)
bdrv_io_plug(child->bs);
}
- if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
+ if (bs->io_plugged++ == 0) {
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_plug) {
drv->bdrv_io_plug(bs);
@@ -2663,7 +2659,7 @@ void bdrv_io_unplug(BlockDriverState *bs)
BdrvChild *child;
assert(bs->io_plugged);
- if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
+ if (--bs->io_plugged == 0) {
BlockDriver *drv = bs->drv;
if (drv && drv->bdrv_io_unplug) {
drv->bdrv_io_unplug(bs);
@@ -2674,36 +2670,3 @@ void bdrv_io_unplug(BlockDriverState *bs)
bdrv_io_unplug(child->bs);
}
}
-
-void bdrv_io_unplugged_begin(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_unplug) {
- drv->bdrv_io_unplug(bs);
- }
- }
-
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_io_unplugged_begin(child->bs);
- }
-}
-
-void bdrv_io_unplugged_end(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- assert(bs->io_plug_disabled);
- QLIST_FOREACH(child, &bs->children, next) {
- bdrv_io_unplugged_end(child->bs);
- }
-
- if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
- BlockDriver *drv = bs->drv;
- if (drv && drv->bdrv_io_plug) {
- drv->bdrv_io_plug(bs);
- }
- }
-}
diff --git a/block/iscsi.c b/block/iscsi.c
index 0960929d57..6aeeb9ec4f 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -362,6 +362,7 @@ iscsi_set_events(IscsiLun *iscsilun)
false,
(ev & POLLIN) ? iscsi_process_read : NULL,
(ev & POLLOUT) ? iscsi_process_write : NULL,
+ NULL,
iscsilun);
iscsilun->events = ev;
}
@@ -1526,7 +1527,7 @@ static void iscsi_detach_aio_context(BlockDriverState *bs)
IscsiLun *iscsilun = bs->opaque;
aio_set_fd_handler(iscsilun->aio_context, iscsi_get_fd(iscsilun->iscsi),
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
iscsilun->events = 0;
if (iscsilun->nop_timer) {
diff --git a/block/linux-aio.c b/block/linux-aio.c
index 1685ec29a3..03ab741d37 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -255,6 +255,20 @@ static void qemu_laio_completion_cb(EventNotifier *e)
}
}
+static bool qemu_laio_poll_cb(void *opaque)
+{
+ EventNotifier *e = opaque;
+ LinuxAioState *s = container_of(e, LinuxAioState, e);
+ struct io_event *events;
+
+ if (!io_getevents_peek(s->ctx, &events)) {
+ return false;
+ }
+
+ qemu_laio_process_completions_and_submit(s);
+ return true;
+}
+
static void laio_cancel(BlockAIOCB *blockacb)
{
struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
@@ -439,7 +453,7 @@ BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
{
- aio_set_event_notifier(old_context, &s->e, false, NULL);
+ aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
qemu_bh_delete(s->completion_bh);
}
@@ -448,7 +462,8 @@ void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
s->aio_context = new_context;
s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
aio_set_event_notifier(new_context, &s->e, false,
- qemu_laio_completion_cb);
+ qemu_laio_completion_cb,
+ qemu_laio_poll_cb);
}
LinuxAioState *laio_init(void)
diff --git a/block/nbd-client.c b/block/nbd-client.c
index 3779c6c999..06f1532805 100644
--- a/block/nbd-client.c
+++ b/block/nbd-client.c
@@ -145,7 +145,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
aio_context = bdrv_get_aio_context(bs);
aio_set_fd_handler(aio_context, s->sioc->fd, false,
- nbd_reply_ready, nbd_restart_write, bs);
+ nbd_reply_ready, nbd_restart_write, NULL, bs);
if (qiov) {
qio_channel_set_cork(s->ioc, true);
rc = nbd_send_request(s->ioc, request);
@@ -161,7 +161,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
rc = nbd_send_request(s->ioc, request);
}
aio_set_fd_handler(aio_context, s->sioc->fd, false,
- nbd_reply_ready, NULL, bs);
+ nbd_reply_ready, NULL, NULL, bs);
s->send_coroutine = NULL;
qemu_co_mutex_unlock(&s->send_mutex);
return rc;
@@ -366,14 +366,14 @@ void nbd_client_detach_aio_context(BlockDriverState *bs)
{
aio_set_fd_handler(bdrv_get_aio_context(bs),
nbd_get_client_session(bs)->sioc->fd,
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
}
void nbd_client_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
aio_set_fd_handler(new_context, nbd_get_client_session(bs)->sioc->fd,
- false, nbd_reply_ready, NULL, bs);
+ false, nbd_reply_ready, NULL, NULL, bs);
}
void nbd_client_close(BlockDriverState *bs)
diff --git a/block/nfs.c b/block/nfs.c
index a490660027..a564340d15 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -197,7 +197,8 @@ static void nfs_set_events(NFSClient *client)
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
false,
(ev & POLLIN) ? nfs_process_read : NULL,
- (ev & POLLOUT) ? nfs_process_write : NULL, client);
+ (ev & POLLOUT) ? nfs_process_write : NULL,
+ NULL, client);
}
client->events = ev;
@@ -395,7 +396,7 @@ static void nfs_detach_aio_context(BlockDriverState *bs)
NFSClient *client = bs->opaque;
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
client->events = 0;
}
@@ -415,7 +416,7 @@ static void nfs_client_close(NFSClient *client)
nfs_close(client->context, client->fh);
}
aio_set_fd_handler(client->aio_context, nfs_get_fd(client->context),
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
nfs_destroy_context(client->context);
}
memset(client, 0, sizeof(NFSClient));
diff --git a/block/qcow.c b/block/qcow.c
index 7540f43f46..fb738fc507 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -104,6 +104,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
unsigned int len, i, shift;
int ret;
QCowHeader header;
+ Error *local_err = NULL;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
@@ -252,7 +253,12 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(&s->migration_blocker, "The qcow format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- migrate_add_blocker(s->migration_blocker);
+ ret = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ goto fail;
+ }
qemu_co_mutex_init(&s->lock);
return 0;
diff --git a/block/quorum.c b/block/quorum.c
index d122299352..86e2072dce 100644
--- a/block/quorum.c
+++ b/block/quorum.c
@@ -97,7 +97,7 @@ typedef struct QuorumAIOCB QuorumAIOCB;
* $children_count QuorumChildRequest.
*/
typedef struct QuorumChildRequest {
- BlockAIOCB *aiocb;
+ BlockDriverState *bs;
QEMUIOVector qiov;
uint8_t *buf;
int ret;
@@ -110,11 +110,12 @@ typedef struct QuorumChildRequest {
* used to do operations on each children and track overall progress.
*/
struct QuorumAIOCB {
- BlockAIOCB common;
+ BlockDriverState *bs;
+ Coroutine *co;
/* Request metadata */
- uint64_t sector_num;
- int nb_sectors;
+ uint64_t offset;
+ uint64_t bytes;
QEMUIOVector *qiov; /* calling IOV */
@@ -133,32 +134,15 @@ struct QuorumAIOCB {
int children_read; /* how many children have been read from */
};
-static bool quorum_vote(QuorumAIOCB *acb);
-
-static void quorum_aio_cancel(BlockAIOCB *blockacb)
-{
- QuorumAIOCB *acb = container_of(blockacb, QuorumAIOCB, common);
- BDRVQuorumState *s = acb->common.bs->opaque;
- int i;
-
- /* cancel all callbacks */
- for (i = 0; i < s->num_children; i++) {
- if (acb->qcrs[i].aiocb) {
- bdrv_aio_cancel_async(acb->qcrs[i].aiocb);
- }
- }
-}
-
-static AIOCBInfo quorum_aiocb_info = {
- .aiocb_size = sizeof(QuorumAIOCB),
- .cancel_async = quorum_aio_cancel,
-};
+typedef struct QuorumCo {
+ QuorumAIOCB *acb;
+ int idx;
+} QuorumCo;
static void quorum_aio_finalize(QuorumAIOCB *acb)
{
- acb->common.cb(acb->common.opaque, acb->vote_ret);
g_free(acb->qcrs);
- qemu_aio_unref(acb);
+ g_free(acb);
}
static bool quorum_sha256_compare(QuorumVoteValue *a, QuorumVoteValue *b)
@@ -171,30 +155,26 @@ static bool quorum_64bits_compare(QuorumVoteValue *a, QuorumVoteValue *b)
return a->l == b->l;
}
-static QuorumAIOCB *quorum_aio_get(BDRVQuorumState *s,
- BlockDriverState *bs,
+static QuorumAIOCB *quorum_aio_get(BlockDriverState *bs,
QEMUIOVector *qiov,
- uint64_t sector_num,
- int nb_sectors,
- BlockCompletionFunc *cb,
- void *opaque)
+ uint64_t offset,
+ uint64_t bytes)
{
- QuorumAIOCB *acb = qemu_aio_get(&quorum_aiocb_info, bs, cb, opaque);
+ BDRVQuorumState *s = bs->opaque;
+ QuorumAIOCB *acb = g_new(QuorumAIOCB, 1);
int i;
- acb->common.bs->opaque = s;
- acb->sector_num = sector_num;
- acb->nb_sectors = nb_sectors;
- acb->qiov = qiov;
- acb->qcrs = g_new0(QuorumChildRequest, s->num_children);
- acb->count = 0;
- acb->success_count = 0;
- acb->rewrite_count = 0;
- acb->votes.compare = quorum_sha256_compare;
- QLIST_INIT(&acb->votes.vote_list);
- acb->is_read = false;
- acb->vote_ret = 0;
+ *acb = (QuorumAIOCB) {
+ .co = qemu_coroutine_self(),
+ .bs = bs,
+ .offset = offset,
+ .bytes = bytes,
+ .qiov = qiov,
+ .votes.compare = quorum_sha256_compare,
+ .votes.vote_list = QLIST_HEAD_INITIALIZER(acb.votes.vote_list),
+ };
+ acb->qcrs = g_new0(QuorumChildRequest, s->num_children);
for (i = 0; i < s->num_children; i++) {
acb->qcrs[i].buf = NULL;
acb->qcrs[i].ret = 0;
@@ -204,30 +184,37 @@ static QuorumAIOCB *quorum_aio_get(BDRVQuorumState *s,
return acb;
}
-static void quorum_report_bad(QuorumOpType type, uint64_t sector_num,
- int nb_sectors, char *node_name, int ret)
+static void quorum_report_bad(QuorumOpType type, uint64_t offset,
+ uint64_t bytes, char *node_name, int ret)
{
const char *msg = NULL;
+ int64_t start_sector = offset / BDRV_SECTOR_SIZE;
+ int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
+
if (ret < 0) {
msg = strerror(-ret);
}
- qapi_event_send_quorum_report_bad(type, !!msg, msg, node_name,
- sector_num, nb_sectors, &error_abort);
+ qapi_event_send_quorum_report_bad(type, !!msg, msg, node_name, start_sector,
+ end_sector - start_sector, &error_abort);
}
static void quorum_report_failure(QuorumAIOCB *acb)
{
- const char *reference = bdrv_get_device_or_node_name(acb->common.bs);
- qapi_event_send_quorum_failure(reference, acb->sector_num,
- acb->nb_sectors, &error_abort);
+ const char *reference = bdrv_get_device_or_node_name(acb->bs);
+ int64_t start_sector = acb->offset / BDRV_SECTOR_SIZE;
+ int64_t end_sector = DIV_ROUND_UP(acb->offset + acb->bytes,
+ BDRV_SECTOR_SIZE);
+
+ qapi_event_send_quorum_failure(reference, start_sector,
+ end_sector - start_sector, &error_abort);
}
static int quorum_vote_error(QuorumAIOCB *acb);
static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
{
- BDRVQuorumState *s = acb->common.bs->opaque;
+ BDRVQuorumState *s = acb->bs->opaque;
if (acb->success_count < s->threshold) {
acb->vote_ret = quorum_vote_error(acb);
@@ -238,22 +225,7 @@ static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
return false;
}
-static void quorum_rewrite_aio_cb(void *opaque, int ret)
-{
- QuorumAIOCB *acb = opaque;
-
- /* one less rewrite to do */
- acb->rewrite_count--;
-
- /* wait until all rewrite callbacks have completed */
- if (acb->rewrite_count) {
- return;
- }
-
- quorum_aio_finalize(acb);
-}
-
-static BlockAIOCB *read_fifo_child(QuorumAIOCB *acb);
+static int read_fifo_child(QuorumAIOCB *acb);
static void quorum_copy_qiov(QEMUIOVector *dest, QEMUIOVector *source)
{
@@ -272,70 +244,7 @@ static void quorum_report_bad_acb(QuorumChildRequest *sacb, int ret)
{
QuorumAIOCB *acb = sacb->parent;
QuorumOpType type = acb->is_read ? QUORUM_OP_TYPE_READ : QUORUM_OP_TYPE_WRITE;
- quorum_report_bad(type, acb->sector_num, acb->nb_sectors,
- sacb->aiocb->bs->node_name, ret);
-}
-
-static void quorum_fifo_aio_cb(void *opaque, int ret)
-{
- QuorumChildRequest *sacb = opaque;
- QuorumAIOCB *acb = sacb->parent;
- BDRVQuorumState *s = acb->common.bs->opaque;
-
- assert(acb->is_read && s->read_pattern == QUORUM_READ_PATTERN_FIFO);
-
- if (ret < 0) {
- quorum_report_bad_acb(sacb, ret);
-
- /* We try to read next child in FIFO order if we fail to read */
- if (acb->children_read < s->num_children) {
- read_fifo_child(acb);
- return;
- }
- }
-
- acb->vote_ret = ret;
-
- /* FIXME: rewrite failed children if acb->children_read > 1? */
- quorum_aio_finalize(acb);
-}
-
-static void quorum_aio_cb(void *opaque, int ret)
-{
- QuorumChildRequest *sacb = opaque;
- QuorumAIOCB *acb = sacb->parent;
- BDRVQuorumState *s = acb->common.bs->opaque;
- bool rewrite = false;
- int i;
-
- sacb->ret = ret;
- if (ret == 0) {
- acb->success_count++;
- } else {
- quorum_report_bad_acb(sacb, ret);
- }
- acb->count++;
- assert(acb->count <= s->num_children);
- assert(acb->success_count <= s->num_children);
- if (acb->count < s->num_children) {
- return;
- }
-
- /* Do the vote on read */
- if (acb->is_read) {
- rewrite = quorum_vote(acb);
- for (i = 0; i < s->num_children; i++) {
- qemu_vfree(acb->qcrs[i].buf);
- qemu_iovec_destroy(&acb->qcrs[i].qiov);
- }
- } else {
- quorum_has_too_much_io_failed(acb);
- }
-
- /* if no rewrite is done the code will finish right away */
- if (!rewrite) {
- quorum_aio_finalize(acb);
- }
+ quorum_report_bad(type, acb->offset, acb->bytes, sacb->bs->node_name, ret);
}
static void quorum_report_bad_versions(BDRVQuorumState *s,
@@ -350,14 +259,31 @@ static void quorum_report_bad_versions(BDRVQuorumState *s,
continue;
}
QLIST_FOREACH(item, &version->items, next) {
- quorum_report_bad(QUORUM_OP_TYPE_READ, acb->sector_num,
- acb->nb_sectors,
+ quorum_report_bad(QUORUM_OP_TYPE_READ, acb->offset, acb->bytes,
s->children[item->index]->bs->node_name, 0);
}
}
}
-static bool quorum_rewrite_bad_versions(BDRVQuorumState *s, QuorumAIOCB *acb,
+static void quorum_rewrite_entry(void *opaque)
+{
+ QuorumCo *co = opaque;
+ QuorumAIOCB *acb = co->acb;
+ BDRVQuorumState *s = acb->bs->opaque;
+
+ /* Ignore any errors, it's just a correction attempt for already
+ * corrupted data. */
+ bdrv_co_pwritev(s->children[co->idx], acb->offset, acb->bytes,
+ acb->qiov, 0);
+
+ /* Wake up the caller after the last rewrite */
+ acb->rewrite_count--;
+ if (!acb->rewrite_count) {
+ qemu_coroutine_enter_if_inactive(acb->co);
+ }
+}
+
+static bool quorum_rewrite_bad_versions(QuorumAIOCB *acb,
QuorumVoteValue *value)
{
QuorumVoteVersion *version;
@@ -376,7 +302,7 @@ static bool quorum_rewrite_bad_versions(BDRVQuorumState *s, QuorumAIOCB *acb,
}
}
- /* quorum_rewrite_aio_cb will count down this to zero */
+ /* quorum_rewrite_entry will count down this to zero */
acb->rewrite_count = count;
/* now fire the correcting rewrites */
@@ -385,9 +311,14 @@ static bool quorum_rewrite_bad_versions(BDRVQuorumState *s, QuorumAIOCB *acb,
continue;
}
QLIST_FOREACH(item, &version->items, next) {
- bdrv_aio_writev(s->children[item->index], acb->sector_num,
- acb->qiov, acb->nb_sectors, quorum_rewrite_aio_cb,
- acb);
+ Coroutine *co;
+ QuorumCo data = {
+ .acb = acb,
+ .idx = item->index,
+ };
+
+ co = qemu_coroutine_create(quorum_rewrite_entry, &data);
+ qemu_coroutine_enter(co);
}
}
@@ -507,8 +438,8 @@ static void GCC_FMT_ATTR(2, 3) quorum_err(QuorumAIOCB *acb,
va_list ap;
va_start(ap, fmt);
- fprintf(stderr, "quorum: sector_num=%" PRId64 " nb_sectors=%d ",
- acb->sector_num, acb->nb_sectors);
+ fprintf(stderr, "quorum: offset=%" PRIu64 " bytes=%" PRIu64 " ",
+ acb->offset, acb->bytes);
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
va_end(ap);
@@ -519,16 +450,15 @@ static bool quorum_compare(QuorumAIOCB *acb,
QEMUIOVector *a,
QEMUIOVector *b)
{
- BDRVQuorumState *s = acb->common.bs->opaque;
+ BDRVQuorumState *s = acb->bs->opaque;
ssize_t offset;
/* This driver will replace blkverify in this particular case */
if (s->is_blkverify) {
offset = qemu_iovec_compare(a, b);
if (offset != -1) {
- quorum_err(acb, "contents mismatch in sector %" PRId64,
- acb->sector_num +
- (uint64_t)(offset / BDRV_SECTOR_SIZE));
+ quorum_err(acb, "contents mismatch at offset %" PRIu64,
+ acb->offset + offset);
}
return true;
}
@@ -539,7 +469,7 @@ static bool quorum_compare(QuorumAIOCB *acb,
/* Do a vote to get the error code */
static int quorum_vote_error(QuorumAIOCB *acb)
{
- BDRVQuorumState *s = acb->common.bs->opaque;
+ BDRVQuorumState *s = acb->bs->opaque;
QuorumVoteVersion *winner = NULL;
QuorumVotes error_votes;
QuorumVoteValue result_value;
@@ -568,17 +498,16 @@ static int quorum_vote_error(QuorumAIOCB *acb)
return ret;
}
-static bool quorum_vote(QuorumAIOCB *acb)
+static void quorum_vote(QuorumAIOCB *acb)
{
bool quorum = true;
- bool rewrite = false;
int i, j, ret;
QuorumVoteValue hash;
- BDRVQuorumState *s = acb->common.bs->opaque;
+ BDRVQuorumState *s = acb->bs->opaque;
QuorumVoteVersion *winner;
if (quorum_has_too_much_io_failed(acb)) {
- return false;
+ return;
}
/* get the index of the first successful read */
@@ -606,7 +535,7 @@ static bool quorum_vote(QuorumAIOCB *acb)
/* Every successful read agrees */
if (quorum) {
quorum_copy_qiov(acb->qiov, &acb->qcrs[i].qiov);
- return false;
+ return;
}
/* compute hashes for each successful read, also store indexes */
@@ -641,19 +570,46 @@ static bool quorum_vote(QuorumAIOCB *acb)
/* corruption correction is enabled */
if (s->rewrite_corrupted) {
- rewrite = quorum_rewrite_bad_versions(s, acb, &winner->value);
+ quorum_rewrite_bad_versions(acb, &winner->value);
}
free_exit:
/* free lists */
quorum_free_vote_list(&acb->votes);
- return rewrite;
}
-static BlockAIOCB *read_quorum_children(QuorumAIOCB *acb)
+static void read_quorum_children_entry(void *opaque)
{
- BDRVQuorumState *s = acb->common.bs->opaque;
- int i;
+ QuorumCo *co = opaque;
+ QuorumAIOCB *acb = co->acb;
+ BDRVQuorumState *s = acb->bs->opaque;
+ int i = co->idx;
+ QuorumChildRequest *sacb = &acb->qcrs[i];
+
+ sacb->bs = s->children[i]->bs;
+ sacb->ret = bdrv_co_preadv(s->children[i], acb->offset, acb->bytes,
+ &acb->qcrs[i].qiov, 0);
+
+ if (sacb->ret == 0) {
+ acb->success_count++;
+ } else {
+ quorum_report_bad_acb(sacb, sacb->ret);
+ }
+
+ acb->count++;
+ assert(acb->count <= s->num_children);
+ assert(acb->success_count <= s->num_children);
+
+ /* Wake up the caller after the last read */
+ if (acb->count == s->num_children) {
+ qemu_coroutine_enter_if_inactive(acb->co);
+ }
+}
+
+static int read_quorum_children(QuorumAIOCB *acb)
+{
+ BDRVQuorumState *s = acb->bs->opaque;
+ int i, ret;
acb->children_read = s->num_children;
for (i = 0; i < s->num_children; i++) {
@@ -663,65 +619,131 @@ static BlockAIOCB *read_quorum_children(QuorumAIOCB *acb)
}
for (i = 0; i < s->num_children; i++) {
- acb->qcrs[i].aiocb = bdrv_aio_readv(s->children[i], acb->sector_num,
- &acb->qcrs[i].qiov, acb->nb_sectors,
- quorum_aio_cb, &acb->qcrs[i]);
+ Coroutine *co;
+ QuorumCo data = {
+ .acb = acb,
+ .idx = i,
+ };
+
+ co = qemu_coroutine_create(read_quorum_children_entry, &data);
+ qemu_coroutine_enter(co);
}
- return &acb->common;
+ while (acb->count < s->num_children) {
+ qemu_coroutine_yield();
+ }
+
+ /* Do the vote on read */
+ quorum_vote(acb);
+ for (i = 0; i < s->num_children; i++) {
+ qemu_vfree(acb->qcrs[i].buf);
+ qemu_iovec_destroy(&acb->qcrs[i].qiov);
+ }
+
+ while (acb->rewrite_count) {
+ qemu_coroutine_yield();
+ }
+
+ ret = acb->vote_ret;
+
+ return ret;
}
-static BlockAIOCB *read_fifo_child(QuorumAIOCB *acb)
+static int read_fifo_child(QuorumAIOCB *acb)
{
- BDRVQuorumState *s = acb->common.bs->opaque;
- int n = acb->children_read++;
+ BDRVQuorumState *s = acb->bs->opaque;
+ int n, ret;
+
+ /* We try to read the next child in FIFO order if we failed to read */
+ do {
+ n = acb->children_read++;
+ acb->qcrs[n].bs = s->children[n]->bs;
+ ret = bdrv_co_preadv(s->children[n], acb->offset, acb->bytes,
+ acb->qiov, 0);
+ if (ret < 0) {
+ quorum_report_bad_acb(&acb->qcrs[n], ret);
+ }
+ } while (ret < 0 && acb->children_read < s->num_children);
- acb->qcrs[n].aiocb = bdrv_aio_readv(s->children[n], acb->sector_num,
- acb->qiov, acb->nb_sectors,
- quorum_fifo_aio_cb, &acb->qcrs[n]);
+ /* FIXME: rewrite failed children if acb->children_read > 1? */
- return &acb->common;
+ return ret;
}
-static BlockAIOCB *quorum_aio_readv(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BlockCompletionFunc *cb,
- void *opaque)
+static int quorum_co_preadv(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov, int flags)
{
BDRVQuorumState *s = bs->opaque;
- QuorumAIOCB *acb = quorum_aio_get(s, bs, qiov, sector_num,
- nb_sectors, cb, opaque);
+ QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes);
+ int ret;
+
acb->is_read = true;
acb->children_read = 0;
if (s->read_pattern == QUORUM_READ_PATTERN_QUORUM) {
- return read_quorum_children(acb);
+ ret = read_quorum_children(acb);
+ } else {
+ ret = read_fifo_child(acb);
+ }
+ quorum_aio_finalize(acb);
+
+ return ret;
+}
+
+static void write_quorum_entry(void *opaque)
+{
+ QuorumCo *co = opaque;
+ QuorumAIOCB *acb = co->acb;
+ BDRVQuorumState *s = acb->bs->opaque;
+ int i = co->idx;
+ QuorumChildRequest *sacb = &acb->qcrs[i];
+
+ sacb->bs = s->children[i]->bs;
+ sacb->ret = bdrv_co_pwritev(s->children[i], acb->offset, acb->bytes,
+ acb->qiov, 0);
+ if (sacb->ret == 0) {
+ acb->success_count++;
+ } else {
+ quorum_report_bad_acb(sacb, sacb->ret);
}
+ acb->count++;
+ assert(acb->count <= s->num_children);
+ assert(acb->success_count <= s->num_children);
- return read_fifo_child(acb);
+ /* Wake up the caller after the last write */
+ if (acb->count == s->num_children) {
+ qemu_coroutine_enter_if_inactive(acb->co);
+ }
}
-static BlockAIOCB *quorum_aio_writev(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *qiov,
- int nb_sectors,
- BlockCompletionFunc *cb,
- void *opaque)
+static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, QEMUIOVector *qiov, int flags)
{
BDRVQuorumState *s = bs->opaque;
- QuorumAIOCB *acb = quorum_aio_get(s, bs, qiov, sector_num, nb_sectors,
- cb, opaque);
- int i;
+ QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes);
+ int i, ret;
for (i = 0; i < s->num_children; i++) {
- acb->qcrs[i].aiocb = bdrv_aio_writev(s->children[i], sector_num,
- qiov, nb_sectors, &quorum_aio_cb,
- &acb->qcrs[i]);
+ Coroutine *co;
+ QuorumCo data = {
+ .acb = acb,
+ .idx = i,
+ };
+
+ co = qemu_coroutine_create(write_quorum_entry, &data);
+ qemu_coroutine_enter(co);
+ }
+
+ while (acb->count < s->num_children) {
+ qemu_coroutine_yield();
}
- return &acb->common;
+ quorum_has_too_much_io_failed(acb);
+
+ ret = acb->vote_ret;
+ quorum_aio_finalize(acb);
+
+ return ret;
}
static int64_t quorum_getlength(BlockDriverState *bs)
@@ -765,7 +787,7 @@ static coroutine_fn int quorum_co_flush(BlockDriverState *bs)
result = bdrv_co_flush(s->children[i]->bs);
if (result) {
quorum_report_bad(QUORUM_OP_TYPE_FLUSH, 0,
- bdrv_nb_sectors(s->children[i]->bs),
+ bdrv_getlength(s->children[i]->bs),
s->children[i]->bs->node_name, result);
result_value.l = result;
quorum_count_vote(&error_votes, &result_value, i);
@@ -1098,8 +1120,8 @@ static BlockDriver bdrv_quorum = {
.bdrv_getlength = quorum_getlength,
- .bdrv_aio_readv = quorum_aio_readv,
- .bdrv_aio_writev = quorum_aio_writev,
+ .bdrv_co_preadv = quorum_co_preadv,
+ .bdrv_co_pwritev = quorum_co_pwritev,
.bdrv_add_child = quorum_add_child,
.bdrv_del_child = quorum_del_child,
diff --git a/block/raw_bsd.c b/block/raw-format.c
index 8a5b9b0424..8404a82e0c 100644
--- a/block/raw_bsd.c
+++ b/block/raw-format.c
@@ -1,4 +1,4 @@
-/* BlockDriver implementation for "raw"
+/* BlockDriver implementation for "raw" format driver
*
* Copyright (C) 2010-2016 Red Hat, Inc.
* Copyright (C) 2010, Blue Swirl <blauwirbel@gmail.com>
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 4c9af89180..5637e0cd37 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -664,7 +664,7 @@ static coroutine_fn void do_co_req(void *opaque)
co = qemu_coroutine_self();
aio_set_fd_handler(srco->aio_context, sockfd, false,
- NULL, restart_co_req, co);
+ NULL, restart_co_req, NULL, co);
ret = send_co_req(sockfd, hdr, data, wlen);
if (ret < 0) {
@@ -672,7 +672,7 @@ static coroutine_fn void do_co_req(void *opaque)
}
aio_set_fd_handler(srco->aio_context, sockfd, false,
- restart_co_req, NULL, co);
+ restart_co_req, NULL, NULL, co);
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
if (ret != sizeof(*hdr)) {
@@ -698,7 +698,7 @@ out:
/* there is at most one request for this sockfd, so it is safe to
* set each handler to NULL. */
aio_set_fd_handler(srco->aio_context, sockfd, false,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
srco->ret = ret;
srco->finished = true;
@@ -760,7 +760,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
AIOReq *aio_req, *next;
aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
- NULL, NULL);
+ NULL, NULL, NULL);
close(s->fd);
s->fd = -1;
@@ -964,7 +964,7 @@ static int get_sheep_fd(BDRVSheepdogState *s, Error **errp)
}
aio_set_fd_handler(s->aio_context, fd, false,
- co_read_response, NULL, s);
+ co_read_response, NULL, NULL, s);
return fd;
}
@@ -1226,7 +1226,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self();
aio_set_fd_handler(s->aio_context, s->fd, false,
- co_read_response, co_write_request, s);
+ co_read_response, co_write_request, NULL, s);
socket_set_cork(s->fd, 1);
/* send a header */
@@ -1245,7 +1245,7 @@ static void coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
out:
socket_set_cork(s->fd, 0);
aio_set_fd_handler(s->aio_context, s->fd, false,
- co_read_response, NULL, s);
+ co_read_response, NULL, NULL, s);
s->co_send = NULL;
qemu_co_mutex_unlock(&s->lock);
}
@@ -1396,7 +1396,7 @@ static void sd_detach_aio_context(BlockDriverState *bs)
BDRVSheepdogState *s = bs->opaque;
aio_set_fd_handler(s->aio_context, s->fd, false, NULL,
- NULL, NULL);
+ NULL, NULL, NULL);
}
static void sd_attach_aio_context(BlockDriverState *bs,
@@ -1406,7 +1406,7 @@ static void sd_attach_aio_context(BlockDriverState *bs,
s->aio_context = new_context;
aio_set_fd_handler(new_context, s->fd, false,
- co_read_response, NULL, s);
+ co_read_response, NULL, NULL, s);
}
/* TODO Convert to fine grained options */
@@ -1520,7 +1520,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
return 0;
out:
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
if (s->fd >= 0) {
closesocket(s->fd);
}
@@ -1559,7 +1559,7 @@ static void sd_reopen_commit(BDRVReopenState *state)
if (s->fd) {
aio_set_fd_handler(s->aio_context, s->fd, false,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
closesocket(s->fd);
}
@@ -1583,7 +1583,7 @@ static void sd_reopen_abort(BDRVReopenState *state)
if (re_s->fd) {
aio_set_fd_handler(s->aio_context, re_s->fd, false,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
closesocket(re_s->fd);
}
@@ -1972,7 +1972,7 @@ static void sd_close(BlockDriverState *bs)
}
aio_set_fd_handler(bdrv_get_aio_context(bs), s->fd,
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
closesocket(s->fd);
g_free(s->host_spec);
}
diff --git a/block/ssh.c b/block/ssh.c
index 15ed2818c5..e0edf20f78 100644
--- a/block/ssh.c
+++ b/block/ssh.c
@@ -911,7 +911,7 @@ static coroutine_fn void set_fd_handler(BDRVSSHState *s, BlockDriverState *bs)
rd_handler, wr_handler);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
- false, rd_handler, wr_handler, co);
+ false, rd_handler, wr_handler, NULL, co);
}
static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
@@ -919,7 +919,7 @@ static coroutine_fn void clear_fd_handler(BDRVSSHState *s,
{
DPRINTF("s->sock=%d", s->sock);
aio_set_fd_handler(bdrv_get_aio_context(bs), s->sock,
- false, NULL, NULL, NULL);
+ false, NULL, NULL, NULL, NULL);
}
/* A non-blocking call returned EAGAIN, so yield, ensuring the
diff --git a/block/trace-events b/block/trace-events
index cfc05f2478..671a6a851c 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -53,8 +53,8 @@ qmp_block_job_resume(void *job) "job %p"
qmp_block_job_complete(void *job) "job %p"
qmp_block_stream(void *bs, void *job) "bs %p job %p"
-# block/raw-win32.c
-# block/raw-posix.c
+# block/file-win32.c
+# block/file-posix.c
paio_submit_co(int64_t offset, int count, int type) "offset %"PRId64" count %d type %d"
paio_submit(void *acb, void *opaque, int64_t offset, int count, int type) "acb %p opaque %p offset %"PRId64" count %d type %d"
diff --git a/block/vdi.c b/block/vdi.c
index 96b78d5a43..0aeb940aa8 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -361,6 +361,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
VdiHeader header;
size_t bmap_size;
int ret;
+ Error *local_err = NULL;
logout("\n");
@@ -471,7 +472,12 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(&s->migration_blocker, "The vdi format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- migrate_add_blocker(s->migration_blocker);
+ ret = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ goto fail_free_bmap;
+ }
qemu_co_mutex_init(&s->write_lock);
diff --git a/block/vhdx.c b/block/vhdx.c
index 0ba2f0a2f9..68db9e074e 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -991,6 +991,17 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
}
}
+ /* Disable migration when VHDX images are used */
+ error_setg(&s->migration_blocker, "The vhdx format used by node '%s' "
+ "does not support live migration",
+ bdrv_get_device_or_node_name(bs));
+ ret = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ goto fail;
+ }
+
if (flags & BDRV_O_RDWR) {
ret = vhdx_update_headers(bs, s, false, NULL);
if (ret < 0) {
@@ -1000,12 +1011,6 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
/* TODO: differencing files */
- /* Disable migration when VHDX images are used */
- error_setg(&s->migration_blocker, "The vhdx format used by node '%s' "
- "does not support live migration",
- bdrv_get_device_or_node_name(bs));
- migrate_add_blocker(s->migration_blocker);
-
return 0;
fail:
vhdx_close(bs);
diff --git a/block/vmdk.c b/block/vmdk.c
index a11c27a1c4..7750212969 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -941,6 +941,7 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
BDRVVmdkState *s = bs->opaque;
uint32_t magic;
+ Error *local_err = NULL;
buf = vmdk_read_desc(bs->file, 0, errp);
if (!buf) {
@@ -976,7 +977,13 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(&s->migration_blocker, "The vmdk format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- migrate_add_blocker(s->migration_blocker);
+ ret = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ goto fail;
+ }
+
g_free(buf);
return 0;
diff --git a/block/vpc.c b/block/vpc.c
index 8d5886f003..ed6353dbd4 100644
--- a/block/vpc.c
+++ b/block/vpc.c
@@ -422,13 +422,18 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
#endif
}
- qemu_co_mutex_init(&s->lock);
-
/* Disable migration when VHD images are used */
error_setg(&s->migration_blocker, "The vpc format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- migrate_add_blocker(s->migration_blocker);
+ ret = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ goto fail;
+ }
+
+ qemu_co_mutex_init(&s->lock);
return 0;
diff --git a/block/vvfat.c b/block/vvfat.c
index ded21092ee..c6bf67e8f3 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -1185,22 +1185,26 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
s->sector_count = s->faked_sectors + s->sectors_per_cluster*s->cluster_count;
- if (s->first_sectors_number == 0x40) {
- init_mbr(s, cyls, heads, secs);
- }
-
- // assert(is_consistent(s));
- qemu_co_mutex_init(&s->lock);
-
/* Disable migration when vvfat is used rw */
if (s->qcow) {
error_setg(&s->migration_blocker,
"The vvfat (rw) format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
- migrate_add_blocker(s->migration_blocker);
+ ret = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ goto fail;
+ }
}
+ if (s->first_sectors_number == 0x40) {
+ init_mbr(s, cyls, heads, secs);
+ }
+
+ qemu_co_mutex_init(&s->lock);
+
ret = 0;
fail:
qemu_opts_del(opts);
diff --git a/block/win32-aio.c b/block/win32-aio.c
index 95e3ab1541..8cdf73b00d 100644
--- a/block/win32-aio.c
+++ b/block/win32-aio.c
@@ -175,7 +175,7 @@ int win32_aio_attach(QEMUWin32AIOState *aio, HANDLE hfile)
void win32_aio_detach_aio_context(QEMUWin32AIOState *aio,
AioContext *old_context)
{
- aio_set_event_notifier(old_context, &aio->e, false, NULL);
+ aio_set_event_notifier(old_context, &aio->e, false, NULL, NULL);
aio->is_aio_context_attached = false;
}
@@ -184,7 +184,7 @@ void win32_aio_attach_aio_context(QEMUWin32AIOState *aio,
{
aio->is_aio_context_attached = true;
aio_set_event_notifier(new_context, &aio->e, false,
- win32_aio_completion_cb);
+ win32_aio_completion_cb, NULL);
}
QEMUWin32AIOState *win32_aio_init(void)
diff --git a/configure b/configure
index 218df87d21..86fd833feb 100755
--- a/configure
+++ b/configure
@@ -228,7 +228,7 @@ vhost_net="no"
vhost_scsi="no"
vhost_vsock="no"
kvm="no"
-colo="yes"
+hax="no"
rdma=""
gprof="no"
debug_tcg="no"
@@ -562,6 +562,7 @@ CYGWIN*)
;;
MINGW32*)
mingw32="yes"
+ hax="yes"
audio_possible_drivers="dsound sdl"
if check_include dsound.h; then
audio_drv_list="dsound"
@@ -611,6 +612,7 @@ OpenBSD)
Darwin)
bsd="yes"
darwin="yes"
+ hax="yes"
LDFLAGS_SHARED="-bundle -undefined dynamic_lookup"
if [ "$cpu" = "x86_64" ] ; then
QEMU_CFLAGS="-arch x86_64 $QEMU_CFLAGS"
@@ -920,9 +922,9 @@ for opt do
;;
--enable-kvm) kvm="yes"
;;
- --disable-colo) colo="no"
+ --disable-hax) hax="no"
;;
- --enable-colo) colo="yes"
+ --enable-hax) hax="yes"
;;
--disable-tcg-interpreter) tcg_interpreter="no"
;;
@@ -1372,7 +1374,7 @@ disabled with --disable-FEATURE, default is enabled if available:
fdt fdt device tree
bluez bluez stack connectivity
kvm KVM acceleration support
- colo COarse-grain LOck-stepping VM for Non-stop Service
+ hax HAX acceleration support
rdma RDMA-based migration support
vde support for vde network
netmap support for netmap network
@@ -2750,7 +2752,7 @@ if compile_prog "" "" ; then
fi
##########################################
-# xfsctl() probe, used for raw-posix
+# xfsctl() probe, used for file-posix.c
if test "$xfs" != "no" ; then
cat > $TMPC << EOF
#include <stddef.h> /* NULL */
@@ -3077,7 +3079,7 @@ fi
# g_test_trap_subprocess added in 2.38. Used by some tests.
glib_subprocess=yes
-if test "$mingw32" = "yes" || ! $pkg_config --atleast-version=2.38 glib-2.0; then
+if ! $pkg_config --atleast-version=2.38 glib-2.0; then
glib_subprocess=no
fi
@@ -5062,7 +5064,7 @@ echo "Linux AIO support $linux_aio"
echo "ATTR/XATTR support $attr"
echo "Install blobs $blobs"
echo "KVM support $kvm"
-echo "COLO support $colo"
+echo "HAX support $hax"
echo "RDMA support $rdma"
echo "TCG interpreter $tcg_interpreter"
echo "fdt support $fdt"
@@ -5701,10 +5703,6 @@ if have_backend "syslog"; then
fi
echo "CONFIG_TRACE_FILE=$trace_file" >> $config_host_mak
-if test "$colo" = "yes"; then
- echo "CONFIG_COLO=y" >> $config_host_mak
-fi
-
if test "$rdma" = "yes" ; then
echo "CONFIG_RDMA=y" >> $config_host_mak
fi
@@ -5845,7 +5843,7 @@ target_name=$(echo $target | cut -d '-' -f 1)
target_bigendian="no"
case "$target_name" in
- armeb|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or32|ppc|ppcemb|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb)
+ armeb|hppa|lm32|m68k|microblaze|mips|mipsn32|mips64|moxie|or32|ppc|ppcemb|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb)
target_bigendian=yes
;;
esac
@@ -5908,6 +5906,8 @@ case "$target_name" in
;;
cris)
;;
+ hppa)
+ ;;
lm32)
;;
m68k)
@@ -5935,6 +5935,8 @@ case "$target_name" in
;;
moxie)
;;
+ nios2)
+ ;;
or32)
TARGET_ARCH=openrisc
TARGET_BASE_ARCH=openrisc
@@ -6050,6 +6052,15 @@ case "$target_name" in
fi
fi
esac
+if test "$hax" = "yes" ; then
+ if test "$target_softmmu" = "yes" ; then
+ case "$target_name" in
+ i386|x86_64)
+ echo "CONFIG_HAX=y" >> $config_target_mak
+ ;;
+ esac
+ fi
+fi
if test "$target_bigendian" = "yes" ; then
echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
fi
@@ -6107,6 +6118,9 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
cris)
disas_config "CRIS"
;;
+ hppa)
+ disas_config "HPPA"
+ ;;
i386|x86_64|x32)
disas_config "I386"
;;
@@ -6128,6 +6142,9 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
moxie*)
disas_config "MOXIE"
;;
+ nios2)
+ disas_config "NIOS2"
+ ;;
or32)
disas_config "OPENRISC"
;;
@@ -6198,7 +6215,7 @@ fi
# build tree in object directory in case the source is not in the current directory
DIRS="tests tests/tcg tests/tcg/cris tests/tcg/lm32 tests/libqos tests/qapi-schema tests/tcg/xtensa tests/qemu-iotests"
-DIRS="$DIRS fsdev"
+DIRS="$DIRS docs fsdev"
DIRS="$DIRS pc-bios/optionrom pc-bios/spapr-rtas pc-bios/s390-ccw"
DIRS="$DIRS roms/seabios roms/vgabios"
DIRS="$DIRS qapi-generated"
diff --git a/contrib/libvhost-user/Makefile.objs b/contrib/libvhost-user/Makefile.objs
new file mode 100644
index 0000000000..cef1ad6e31
--- /dev/null
+++ b/contrib/libvhost-user/Makefile.objs
@@ -0,0 +1 @@
+libvhost-user-obj-y = libvhost-user.o
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
new file mode 100644
index 0000000000..af4faad60b
--- /dev/null
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -0,0 +1,1499 @@
+/*
+ * Vhost User library
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ * Victor Kaplansky <victork@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#include <qemu/osdep.h>
+#include <sys/eventfd.h>
+#include <linux/vhost.h>
+
+#include "qemu/atomic.h"
+
+#include "libvhost-user.h"
+
+#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION 1
+#define LIBVHOST_USER_DEBUG 0
+
+#define DPRINT(...) \
+ do { \
+ if (LIBVHOST_USER_DEBUG) { \
+ fprintf(stderr, __VA_ARGS__); \
+ } \
+ } while (0)
+
+static const char *
+vu_request_to_string(int req)
+{
+#define REQ(req) [req] = #req
+ static const char *vu_request_str[] = {
+ REQ(VHOST_USER_NONE),
+ REQ(VHOST_USER_GET_FEATURES),
+ REQ(VHOST_USER_SET_FEATURES),
+ REQ(VHOST_USER_NONE),
+ REQ(VHOST_USER_GET_FEATURES),
+ REQ(VHOST_USER_SET_FEATURES),
+ REQ(VHOST_USER_SET_OWNER),
+ REQ(VHOST_USER_RESET_OWNER),
+ REQ(VHOST_USER_SET_MEM_TABLE),
+ REQ(VHOST_USER_SET_LOG_BASE),
+ REQ(VHOST_USER_SET_LOG_FD),
+ REQ(VHOST_USER_SET_VRING_NUM),
+ REQ(VHOST_USER_SET_VRING_ADDR),
+ REQ(VHOST_USER_SET_VRING_BASE),
+ REQ(VHOST_USER_GET_VRING_BASE),
+ REQ(VHOST_USER_SET_VRING_KICK),
+ REQ(VHOST_USER_SET_VRING_CALL),
+ REQ(VHOST_USER_SET_VRING_ERR),
+ REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
+ REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
+ REQ(VHOST_USER_GET_QUEUE_NUM),
+ REQ(VHOST_USER_SET_VRING_ENABLE),
+ REQ(VHOST_USER_SEND_RARP),
+ REQ(VHOST_USER_INPUT_GET_CONFIG),
+ REQ(VHOST_USER_MAX),
+ };
+#undef REQ
+
+ if (req < VHOST_USER_MAX) {
+ return vu_request_str[req];
+ } else {
+ return "unknown";
+ }
+}
+
+static void
+vu_panic(VuDev *dev, const char *msg, ...)
+{
+ char *buf = NULL;
+ va_list ap;
+
+ va_start(ap, msg);
+ (void)vasprintf(&buf, msg, ap);
+ va_end(ap);
+
+ dev->broken = true;
+ dev->panic(dev, buf);
+ free(buf);
+
+ /* FIXME: find a way to call virtio_error? */
+}
+
+/* Translate guest physical address to our virtual address. */
+void *
+vu_gpa_to_va(VuDev *dev, uint64_t guest_addr)
+{
+ int i;
+
+ /* Find matching memory region. */
+ for (i = 0; i < dev->nregions; i++) {
+ VuDevRegion *r = &dev->regions[i];
+
+ if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
+ return (void *)(uintptr_t)
+ guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
+ }
+ }
+
+ return NULL;
+}
+
+/* Translate qemu virtual address to our virtual address. */
+static void *
+qva_to_va(VuDev *dev, uint64_t qemu_addr)
+{
+ int i;
+
+ /* Find matching memory region. */
+ for (i = 0; i < dev->nregions; i++) {
+ VuDevRegion *r = &dev->regions[i];
+
+ if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
+ return (void *)(uintptr_t)
+ qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+vmsg_close_fds(VhostUserMsg *vmsg)
+{
+ int i;
+
+ for (i = 0; i < vmsg->fd_num; i++) {
+ close(vmsg->fds[i]);
+ }
+}
+
+static bool
+vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
+{
+ char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
+ struct iovec iov = {
+ .iov_base = (char *)vmsg,
+ .iov_len = VHOST_USER_HDR_SIZE,
+ };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = control,
+ .msg_controllen = sizeof(control),
+ };
+ size_t fd_size;
+ struct cmsghdr *cmsg;
+ int rc;
+
+ do {
+ rc = recvmsg(conn_fd, &msg, 0);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (rc <= 0) {
+ vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
+ return false;
+ }
+
+ vmsg->fd_num = 0;
+ for (cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&msg, cmsg))
+ {
+ if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
+ fd_size = cmsg->cmsg_len - CMSG_LEN(0);
+ vmsg->fd_num = fd_size / sizeof(int);
+ memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
+ break;
+ }
+ }
+
+ if (vmsg->size > sizeof(vmsg->payload)) {
+ vu_panic(dev,
+ "Error: too big message request: %d, size: vmsg->size: %u, "
+ "while sizeof(vmsg->payload) = %zu\n",
+ vmsg->request, vmsg->size, sizeof(vmsg->payload));
+ goto fail;
+ }
+
+ if (vmsg->size) {
+ do {
+ rc = read(conn_fd, &vmsg->payload, vmsg->size);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (rc <= 0) {
+ vu_panic(dev, "Error while reading: %s", strerror(errno));
+ goto fail;
+ }
+
+ assert(rc == vmsg->size);
+ }
+
+ return true;
+
+fail:
+ vmsg_close_fds(vmsg);
+
+ return false;
+}
+
+static bool
+vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
+{
+ int rc;
+ uint8_t *p = (uint8_t *)vmsg;
+
+ /* Set the version in the flags when sending the reply */
+ vmsg->flags &= ~VHOST_USER_VERSION_MASK;
+ vmsg->flags |= VHOST_USER_VERSION;
+ vmsg->flags |= VHOST_USER_REPLY_MASK;
+
+ do {
+ rc = write(conn_fd, p, VHOST_USER_HDR_SIZE);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ do {
+ if (vmsg->data) {
+ rc = write(conn_fd, vmsg->data, vmsg->size);
+ } else {
+ rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
+ }
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (rc <= 0) {
+ vu_panic(dev, "Error while writing: %s", strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+/* Kick the log_call_fd if required. */
+static void
+vu_log_kick(VuDev *dev)
+{
+ if (dev->log_call_fd != -1) {
+ DPRINT("Kicking the QEMU's log...\n");
+ if (eventfd_write(dev->log_call_fd, 1) < 0) {
+ vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
+ }
+ }
+}
+
+static void
+vu_log_page(uint8_t *log_table, uint64_t page)
+{
+ DPRINT("Logged dirty guest page: %"PRId64"\n", page);
+ atomic_or(&log_table[page / 8], 1 << (page % 8));
+}
+
+static void
+vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
+{
+ uint64_t page;
+
+ if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
+ !dev->log_table || !length) {
+ return;
+ }
+
+ assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
+
+ page = address / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < address + length) {
+ vu_log_page(dev->log_table, page);
+ page += VHOST_LOG_PAGE;
+ }
+
+ vu_log_kick(dev);
+}
+
+static void
+vu_kick_cb(VuDev *dev, int condition, void *data)
+{
+ int index = (intptr_t)data;
+ VuVirtq *vq = &dev->vq[index];
+ int sock = vq->kick_fd;
+ eventfd_t kick_data;
+ ssize_t rc;
+
+ rc = eventfd_read(sock, &kick_data);
+ if (rc == -1) {
+ vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
+ dev->remove_watch(dev, dev->vq[index].kick_fd);
+ } else {
+ DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
+ kick_data, vq->handler, index);
+ if (vq->handler) {
+ vq->handler(dev, index);
+ }
+ }
+}
+
+static bool
+vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ vmsg->payload.u64 =
+ 1ULL << VHOST_F_LOG_ALL |
+ 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
+
+ if (dev->iface->get_features) {
+ vmsg->payload.u64 |= dev->iface->get_features(dev);
+ }
+
+ vmsg->size = sizeof(vmsg->payload.u64);
+
+ DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
+
+ return true;
+}
+
+static void
+vu_set_enable_all_rings(VuDev *dev, bool enabled)
+{
+ int i;
+
+ for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ dev->vq[i].enable = enabled;
+ }
+}
+
+static bool
+vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
+
+ dev->features = vmsg->payload.u64;
+
+ if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
+ vu_set_enable_all_rings(dev, true);
+ }
+
+ if (dev->iface->set_features) {
+ dev->iface->set_features(dev, dev->features);
+ }
+
+ return false;
+}
+
+static bool
+vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ return false;
+}
+
+static void
+vu_close_log(VuDev *dev)
+{
+ if (dev->log_table) {
+ if (munmap(dev->log_table, dev->log_size) != 0) {
+ perror("close log munmap() error");
+ }
+
+ dev->log_table = NULL;
+ }
+ if (dev->log_call_fd != -1) {
+ close(dev->log_call_fd);
+ dev->log_call_fd = -1;
+ }
+}
+
+static bool
+vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ vu_set_enable_all_rings(dev, false);
+
+ return false;
+}
+
+static bool
+vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int i;
+ VhostUserMemory *memory = &vmsg->payload.memory;
+ dev->nregions = memory->nregions;
+
+ DPRINT("Nregions: %d\n", memory->nregions);
+ for (i = 0; i < dev->nregions; i++) {
+ void *mmap_addr;
+ VhostUserMemoryRegion *msg_region = &memory->regions[i];
+ VuDevRegion *dev_region = &dev->regions[i];
+
+ DPRINT("Region %d\n", i);
+ DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
+ msg_region->guest_phys_addr);
+ DPRINT(" memory_size: 0x%016"PRIx64"\n",
+ msg_region->memory_size);
+ DPRINT(" userspace_addr 0x%016"PRIx64"\n",
+ msg_region->userspace_addr);
+ DPRINT(" mmap_offset 0x%016"PRIx64"\n",
+ msg_region->mmap_offset);
+
+ dev_region->gpa = msg_region->guest_phys_addr;
+ dev_region->size = msg_region->memory_size;
+ dev_region->qva = msg_region->userspace_addr;
+ dev_region->mmap_offset = msg_region->mmap_offset;
+
+ /* We don't use offset argument of mmap() since the
+ * mapped address has to be page aligned, and we use huge
+ * pages. */
+ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ vmsg->fds[i], 0);
+
+ if (mmap_addr == MAP_FAILED) {
+ vu_panic(dev, "region mmap error: %s", strerror(errno));
+ } else {
+ dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
+ DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
+ dev_region->mmap_addr);
+ }
+
+ close(vmsg->fds[i]);
+ }
+
+ return false;
+}
+
+static bool
+vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int fd;
+ uint64_t log_mmap_size, log_mmap_offset;
+ void *rc;
+
+ if (vmsg->fd_num != 1 ||
+ vmsg->size != sizeof(vmsg->payload.log)) {
+ vu_panic(dev, "Invalid log_base message");
+ return true;
+ }
+
+ fd = vmsg->fds[0];
+ log_mmap_offset = vmsg->payload.log.mmap_offset;
+ log_mmap_size = vmsg->payload.log.mmap_size;
+ DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
+ DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
+
+ rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
+ log_mmap_offset);
+ if (rc == MAP_FAILED) {
+ perror("log mmap error");
+ }
+ dev->log_table = rc;
+ dev->log_size = log_mmap_size;
+
+ vmsg->size = sizeof(vmsg->payload.u64);
+
+ return true;
+}
+
+static bool
+vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ if (vmsg->fd_num != 1) {
+ vu_panic(dev, "Invalid log_fd message");
+ return false;
+ }
+
+ if (dev->log_call_fd != -1) {
+ close(dev->log_call_fd);
+ }
+ dev->log_call_fd = vmsg->fds[0];
+ DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
+
+ return false;
+}
+
+static bool
+vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ unsigned int index = vmsg->payload.state.index;
+ unsigned int num = vmsg->payload.state.num;
+
+ DPRINT("State.index: %d\n", index);
+ DPRINT("State.num: %d\n", num);
+ dev->vq[index].vring.num = num;
+
+ return false;
+}
+
+static bool
+vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ struct vhost_vring_addr *vra = &vmsg->payload.addr;
+ unsigned int index = vra->index;
+ VuVirtq *vq = &dev->vq[index];
+
+ DPRINT("vhost_vring_addr:\n");
+ DPRINT(" index: %d\n", vra->index);
+ DPRINT(" flags: %d\n", vra->flags);
+ DPRINT(" desc_user_addr: 0x%016llx\n", vra->desc_user_addr);
+ DPRINT(" used_user_addr: 0x%016llx\n", vra->used_user_addr);
+ DPRINT(" avail_user_addr: 0x%016llx\n", vra->avail_user_addr);
+ DPRINT(" log_guest_addr: 0x%016llx\n", vra->log_guest_addr);
+
+ vq->vring.flags = vra->flags;
+ vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
+ vq->vring.used = qva_to_va(dev, vra->used_user_addr);
+ vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
+ vq->vring.log_guest_addr = vra->log_guest_addr;
+
+ DPRINT("Setting virtq addresses:\n");
+ DPRINT(" vring_desc at %p\n", vq->vring.desc);
+ DPRINT(" vring_used at %p\n", vq->vring.used);
+ DPRINT(" vring_avail at %p\n", vq->vring.avail);
+
+ if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
+ vu_panic(dev, "Invalid vring_addr message");
+ return false;
+ }
+
+ vq->used_idx = vq->vring.used->idx;
+
+ return false;
+}
+
+static bool
+vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ unsigned int index = vmsg->payload.state.index;
+ unsigned int num = vmsg->payload.state.num;
+
+ DPRINT("State.index: %d\n", index);
+ DPRINT("State.num: %d\n", num);
+ dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
+
+ return false;
+}
+
+static bool
+vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ unsigned int index = vmsg->payload.state.index;
+
+ DPRINT("State.index: %d\n", index);
+ vmsg->payload.state.num = dev->vq[index].last_avail_idx;
+ vmsg->size = sizeof(vmsg->payload.state);
+
+ dev->vq[index].started = false;
+ if (dev->iface->queue_set_started) {
+ dev->iface->queue_set_started(dev, index, false);
+ }
+
+ if (dev->vq[index].call_fd != -1) {
+ close(dev->vq[index].call_fd);
+ dev->vq[index].call_fd = -1;
+ }
+ if (dev->vq[index].kick_fd != -1) {
+ dev->remove_watch(dev, dev->vq[index].kick_fd);
+ close(dev->vq[index].kick_fd);
+ dev->vq[index].kick_fd = -1;
+ }
+
+ return true;
+}
+
+static bool
+vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ if (index >= VHOST_MAX_NR_VIRTQUEUE) {
+ vmsg_close_fds(vmsg);
+ vu_panic(dev, "Invalid queue index: %u", index);
+ return false;
+ }
+
+ if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK ||
+ vmsg->fd_num != 1) {
+ vmsg_close_fds(vmsg);
+ vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
+
+ if (!vu_check_queue_msg_file(dev, vmsg)) {
+ return false;
+ }
+
+ if (dev->vq[index].kick_fd != -1) {
+ dev->remove_watch(dev, dev->vq[index].kick_fd);
+ close(dev->vq[index].kick_fd);
+ dev->vq[index].kick_fd = -1;
+ }
+
+ if (!(vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
+ dev->vq[index].kick_fd = vmsg->fds[0];
+ DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
+ }
+
+ dev->vq[index].started = true;
+ if (dev->iface->queue_set_started) {
+ dev->iface->queue_set_started(dev, index, true);
+ }
+
+ if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
+ dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
+ vu_kick_cb, (void *)(long)index);
+
+ DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
+ dev->vq[index].kick_fd, index);
+ }
+
+ return false;
+}
+
+void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
+ vu_queue_handler_cb handler)
+{
+ int qidx = vq - dev->vq;
+
+ vq->handler = handler;
+ if (vq->kick_fd >= 0) {
+ if (handler) {
+ dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
+ vu_kick_cb, (void *)(long)qidx);
+ } else {
+ dev->remove_watch(dev, vq->kick_fd);
+ }
+ }
+}
+
+static bool
+vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
+
+ if (!vu_check_queue_msg_file(dev, vmsg)) {
+ return false;
+ }
+
+ if (dev->vq[index].call_fd != -1) {
+ close(dev->vq[index].call_fd);
+ dev->vq[index].call_fd = -1;
+ }
+
+ if (!(vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
+ dev->vq[index].call_fd = vmsg->fds[0];
+ }
+
+ DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
+
+ return false;
+}
+
+static bool
+vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
+
+ if (!vu_check_queue_msg_file(dev, vmsg)) {
+ return false;
+ }
+
+ if (dev->vq[index].err_fd != -1) {
+ close(dev->vq[index].err_fd);
+ dev->vq[index].err_fd = -1;
+ }
+
+ if (!(vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
+ dev->vq[index].err_fd = vmsg->fds[0];
+ }
+
+ return false;
+}
+
+static bool
+vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
+
+ if (dev->iface->get_protocol_features) {
+ features |= dev->iface->get_protocol_features(dev);
+ }
+
+ vmsg->payload.u64 = features;
+ vmsg->size = sizeof(vmsg->payload.u64);
+
+ return true;
+}
+
+static bool
+vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ uint64_t features = vmsg->payload.u64;
+
+ DPRINT("u64: 0x%016"PRIx64"\n", features);
+
+ dev->protocol_features = vmsg->payload.u64;
+
+ if (dev->iface->set_protocol_features) {
+ dev->iface->set_protocol_features(dev, features);
+ }
+
+ return false;
+}
+
+static bool
+vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ DPRINT("Function %s() not implemented yet.\n", __func__);
+ return false;
+}
+
+static bool
+vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
+{
+ unsigned int index = vmsg->payload.state.index;
+ unsigned int enable = vmsg->payload.state.num;
+
+ DPRINT("State.index: %d\n", index);
+ DPRINT("State.enable: %d\n", enable);
+
+ if (index >= VHOST_MAX_NR_VIRTQUEUE) {
+ vu_panic(dev, "Invalid vring_enable index: %u", index);
+ return false;
+ }
+
+ dev->vq[index].enable = enable;
+ return false;
+}
+
+static bool
+vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
+{
+ int do_reply = 0;
+
+ /* Print out generic part of the request. */
+ DPRINT("================ Vhost user message ================\n");
+ DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
+ vmsg->request);
+ DPRINT("Flags: 0x%x\n", vmsg->flags);
+ DPRINT("Size: %d\n", vmsg->size);
+
+ if (vmsg->fd_num) {
+ int i;
+ DPRINT("Fds:");
+ for (i = 0; i < vmsg->fd_num; i++) {
+ DPRINT(" %d", vmsg->fds[i]);
+ }
+ DPRINT("\n");
+ }
+
+ if (dev->iface->process_msg &&
+ dev->iface->process_msg(dev, vmsg, &do_reply)) {
+ return do_reply;
+ }
+
+ switch (vmsg->request) {
+ case VHOST_USER_GET_FEATURES:
+ return vu_get_features_exec(dev, vmsg);
+ case VHOST_USER_SET_FEATURES:
+ return vu_set_features_exec(dev, vmsg);
+ case VHOST_USER_GET_PROTOCOL_FEATURES:
+ return vu_get_protocol_features_exec(dev, vmsg);
+ case VHOST_USER_SET_PROTOCOL_FEATURES:
+ return vu_set_protocol_features_exec(dev, vmsg);
+ case VHOST_USER_SET_OWNER:
+ return vu_set_owner_exec(dev, vmsg);
+ case VHOST_USER_RESET_OWNER:
+ return vu_reset_device_exec(dev, vmsg);
+ case VHOST_USER_SET_MEM_TABLE:
+ return vu_set_mem_table_exec(dev, vmsg);
+ case VHOST_USER_SET_LOG_BASE:
+ return vu_set_log_base_exec(dev, vmsg);
+ case VHOST_USER_SET_LOG_FD:
+ return vu_set_log_fd_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_NUM:
+ return vu_set_vring_num_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_ADDR:
+ return vu_set_vring_addr_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_BASE:
+ return vu_set_vring_base_exec(dev, vmsg);
+ case VHOST_USER_GET_VRING_BASE:
+ return vu_get_vring_base_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_KICK:
+ return vu_set_vring_kick_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_CALL:
+ return vu_set_vring_call_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_ERR:
+ return vu_set_vring_err_exec(dev, vmsg);
+ case VHOST_USER_GET_QUEUE_NUM:
+ return vu_get_queue_num_exec(dev, vmsg);
+ case VHOST_USER_SET_VRING_ENABLE:
+ return vu_set_vring_enable_exec(dev, vmsg);
+ default:
+ vmsg_close_fds(vmsg);
+ vu_panic(dev, "Unhandled request: %d", vmsg->request);
+ }
+
+ return false;
+}
+
+bool
+vu_dispatch(VuDev *dev)
+{
+ VhostUserMsg vmsg = { 0, };
+ int reply_requested;
+ bool success = false;
+
+ if (!vu_message_read(dev, dev->sock, &vmsg)) {
+ goto end;
+ }
+
+ reply_requested = vu_process_message(dev, &vmsg);
+ if (!reply_requested) {
+ success = true;
+ goto end;
+ }
+
+ if (!vu_message_write(dev, dev->sock, &vmsg)) {
+ goto end;
+ }
+
+ success = true;
+
+end:
+ g_free(vmsg.data);
+ return success;
+}
+
+void
+vu_deinit(VuDev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nregions; i++) {
+ VuDevRegion *r = &dev->regions[i];
+ void *m = (void *) (uintptr_t) r->mmap_addr;
+ if (m != MAP_FAILED) {
+ munmap(m, r->size + r->mmap_offset);
+ }
+ }
+ dev->nregions = 0;
+
+ for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ VuVirtq *vq = &dev->vq[i];
+
+ if (vq->call_fd != -1) {
+ close(vq->call_fd);
+ vq->call_fd = -1;
+ }
+
+ if (vq->kick_fd != -1) {
+ close(vq->kick_fd);
+ vq->kick_fd = -1;
+ }
+
+ if (vq->err_fd != -1) {
+ close(vq->err_fd);
+ vq->err_fd = -1;
+ }
+ }
+
+
+ vu_close_log(dev);
+
+ if (dev->sock != -1) {
+ close(dev->sock);
+ }
+}
+
+void
+vu_init(VuDev *dev,
+ int socket,
+ vu_panic_cb panic,
+ vu_set_watch_cb set_watch,
+ vu_remove_watch_cb remove_watch,
+ const VuDevIface *iface)
+{
+ int i;
+
+ assert(socket >= 0);
+ assert(set_watch);
+ assert(remove_watch);
+ assert(iface);
+ assert(panic);
+
+ memset(dev, 0, sizeof(*dev));
+
+ dev->sock = socket;
+ dev->panic = panic;
+ dev->set_watch = set_watch;
+ dev->remove_watch = remove_watch;
+ dev->iface = iface;
+ dev->log_call_fd = -1;
+ for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ dev->vq[i] = (VuVirtq) {
+ .call_fd = -1, .kick_fd = -1, .err_fd = -1,
+ .notification = true,
+ };
+ }
+}
+
+VuVirtq *
+vu_get_queue(VuDev *dev, int qidx)
+{
+ assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
+ return &dev->vq[qidx];
+}
+
+bool
+vu_queue_enabled(VuDev *dev, VuVirtq *vq)
+{
+ return vq->enable;
+}
+
+static inline uint16_t
+vring_avail_flags(VuVirtq *vq)
+{
+ return vq->vring.avail->flags;
+}
+
+static inline uint16_t
+vring_avail_idx(VuVirtq *vq)
+{
+ vq->shadow_avail_idx = vq->vring.avail->idx;
+
+ return vq->shadow_avail_idx;
+}
+
+static inline uint16_t
+vring_avail_ring(VuVirtq *vq, int i)
+{
+ return vq->vring.avail->ring[i];
+}
+
+static inline uint16_t
+vring_get_used_event(VuVirtq *vq)
+{
+ return vring_avail_ring(vq, vq->vring.num);
+}
+
+static int
+virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
+{
+ uint16_t num_heads = vring_avail_idx(vq) - idx;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ if (num_heads > vq->vring.num) {
+ vu_panic(dev, "Guest moved used index from %u to %u",
+ idx, vq->shadow_avail_idx);
+ return -1;
+ }
+ if (num_heads) {
+ /* On success, callers read a descriptor at vq->last_avail_idx.
+ * Make sure descriptor read does not bypass avail index read. */
+ smp_rmb();
+ }
+
+ return num_heads;
+}
+
+static bool
+virtqueue_get_head(VuDev *dev, VuVirtq *vq,
+ unsigned int idx, unsigned int *head)
+{
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ *head = vring_avail_ring(vq, idx % vq->vring.num);
+
+ /* If their number is silly, that's a fatal mistake. */
+ if (*head >= vq->vring.num) {
+ vu_panic(dev, "Guest says index %u is available", head);
+ return false;
+ }
+
+ return true;
+}
+
+enum {
+ VIRTQUEUE_READ_DESC_ERROR = -1,
+ VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
+ VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
+};
+
+static int
+virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
+ int i, unsigned int max, unsigned int *next)
+{
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
+ return VIRTQUEUE_READ_DESC_DONE;
+ }
+
+ /* Check they're not leading us off end of descriptors. */
+ *next = desc[i].next;
+ /* Make sure compiler knows to grab that: we don't want it changing! */
+ smp_wmb();
+
+ if (*next >= max) {
+ vu_panic(dev, "Desc next is %u", next);
+ return VIRTQUEUE_READ_DESC_ERROR;
+ }
+
+ return VIRTQUEUE_READ_DESC_MORE;
+}
+
+void
+vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes)
+{
+ unsigned int idx;
+ unsigned int total_bufs, in_total, out_total;
+ int rc;
+
+ idx = vq->last_avail_idx;
+
+ total_bufs = in_total = out_total = 0;
+ while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
+ unsigned int max, num_bufs, indirect = 0;
+ struct vring_desc *desc;
+ unsigned int i;
+
+ max = vq->vring.num;
+ num_bufs = total_bufs;
+ if (!virtqueue_get_head(dev, vq, idx++, &i)) {
+ goto err;
+ }
+ desc = vq->vring.desc;
+
+ if (desc[i].flags & VRING_DESC_F_INDIRECT) {
+ if (desc[i].len % sizeof(struct vring_desc)) {
+ vu_panic(dev, "Invalid size for indirect buffer table");
+ goto err;
+ }
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if (num_bufs >= max) {
+ vu_panic(dev, "Looped descriptor");
+ goto err;
+ }
+
+ /* loop over the indirect descriptor table */
+ indirect = 1;
+ max = desc[i].len / sizeof(struct vring_desc);
+ desc = vu_gpa_to_va(dev, desc[i].addr);
+ num_bufs = i = 0;
+ }
+
+ do {
+ /* If we've got too many, that implies a descriptor loop. */
+ if (++num_bufs > max) {
+ vu_panic(dev, "Looped descriptor");
+ goto err;
+ }
+
+ if (desc[i].flags & VRING_DESC_F_WRITE) {
+ in_total += desc[i].len;
+ } else {
+ out_total += desc[i].len;
+ }
+ if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
+ goto done;
+ }
+ rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR) {
+ goto err;
+ }
+
+ if (!indirect) {
+ total_bufs = num_bufs;
+ } else {
+ total_bufs++;
+ }
+ }
+ if (rc < 0) {
+ goto err;
+ }
+done:
+ if (in_bytes) {
+ *in_bytes = in_total;
+ }
+ if (out_bytes) {
+ *out_bytes = out_total;
+ }
+ return;
+
+err:
+ in_total = out_total = 0;
+ goto done;
+}
+
+bool
+vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
+ unsigned int out_bytes)
+{
+ unsigned int in_total, out_total;
+
+ vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
+ in_bytes, out_bytes);
+
+ return in_bytes <= in_total && out_bytes <= out_total;
+}
+
+/* Fetch avail_idx from VQ memory only when we really need to know if
+ * guest has added some buffers. */
+int
+vu_queue_empty(VuDev *dev, VuVirtq *vq)
+{
+ if (vq->shadow_avail_idx != vq->last_avail_idx) {
+ return 0;
+ }
+
+ return vring_avail_idx(vq) == vq->last_avail_idx;
+}
+
+static inline
+bool has_feature(uint64_t features, unsigned int fbit)
+{
+ assert(fbit < 64);
+ return !!(features & (1ULL << fbit));
+}
+
+static inline
+bool vu_has_feature(VuDev *dev,
+ unsigned int fbit)
+{
+ return has_feature(dev->features, fbit);
+}
+
+static bool
+vring_notify(VuDev *dev, VuVirtq *vq)
+{
+ uint16_t old, new;
+ bool v;
+
+ /* We need to expose used array entries before checking used event. */
+ smp_mb();
+
+ /* Always notify when queue is empty (when feature acknowledge) */
+ if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ !vq->inuse && vu_queue_empty(dev, vq)) {
+ return true;
+ }
+
+ if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
+ }
+
+ v = vq->signalled_used_valid;
+ vq->signalled_used_valid = true;
+ old = vq->signalled_used;
+ new = vq->signalled_used = vq->used_idx;
+ return !v || vring_need_event(vring_get_used_event(vq), new, old);
+}
+
+void
+vu_queue_notify(VuDev *dev, VuVirtq *vq)
+{
+ if (unlikely(dev->broken)) {
+ return;
+ }
+
+ if (!vring_notify(dev, vq)) {
+ DPRINT("skipped notify...\n");
+ return;
+ }
+
+ if (eventfd_write(vq->call_fd, 1) < 0) {
+ vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
+ }
+}
+
+static inline void
+vring_used_flags_set_bit(VuVirtq *vq, int mask)
+{
+ uint16_t *flags;
+
+ flags = (uint16_t *)((char*)vq->vring.used +
+ offsetof(struct vring_used, flags));
+ *flags |= mask;
+}
+
+static inline void
+vring_used_flags_unset_bit(VuVirtq *vq, int mask)
+{
+ uint16_t *flags;
+
+ flags = (uint16_t *)((char*)vq->vring.used +
+ offsetof(struct vring_used, flags));
+ *flags &= ~mask;
+}
+
+static inline void
+vring_set_avail_event(VuVirtq *vq, uint16_t val)
+{
+ if (!vq->notification) {
+ return;
+ }
+
+ *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
+}
+
+void
+vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
+{
+ vq->notification = enable;
+ if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_avail_event(vq, vring_avail_idx(vq));
+ } else if (enable) {
+ vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
+ } else {
+ vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
+ }
+ if (enable) {
+ /* Expose avail event/used flags before caller checks the avail idx. */
+ smp_mb();
+ }
+}
+
+static void
+virtqueue_map_desc(VuDev *dev,
+ unsigned int *p_num_sg, struct iovec *iov,
+ unsigned int max_num_sg, bool is_write,
+ uint64_t pa, size_t sz)
+{
+ unsigned num_sg = *p_num_sg;
+
+ assert(num_sg <= max_num_sg);
+
+ if (!sz) {
+ vu_panic(dev, "virtio: zero sized buffers are not allowed");
+ return;
+ }
+
+ iov[num_sg].iov_base = vu_gpa_to_va(dev, pa);
+ iov[num_sg].iov_len = sz;
+ num_sg++;
+
+ *p_num_sg = num_sg;
+}
+
+/* Round number down to multiple */
+#define ALIGN_DOWN(n, m) ((n) / (m) * (m))
+
+/* Round number up to multiple */
+#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
+
+static void *
+virtqueue_alloc_element(size_t sz,
+ unsigned out_num, unsigned in_num)
+{
+ VuVirtqElement *elem;
+ size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
+ size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
+ size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
+
+ assert(sz >= sizeof(VuVirtqElement));
+ elem = malloc(out_sg_end);
+ elem->out_num = out_num;
+ elem->in_num = in_num;
+ elem->in_sg = (void *)elem + in_sg_ofs;
+ elem->out_sg = (void *)elem + out_sg_ofs;
+ return elem;
+}
+
+void *
+vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
+{
+ unsigned int i, head, max;
+ VuVirtqElement *elem;
+ unsigned out_num, in_num;
+ struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ struct vring_desc *desc;
+ int rc;
+
+ if (unlikely(dev->broken)) {
+ return NULL;
+ }
+
+ if (vu_queue_empty(dev, vq)) {
+ return NULL;
+ }
+ /* Needed after virtio_queue_empty(), see comment in
+ * virtqueue_num_heads(). */
+ smp_rmb();
+
+ /* When we start there are none of either input nor output. */
+ out_num = in_num = 0;
+
+ max = vq->vring.num;
+ if (vq->inuse >= vq->vring.num) {
+ vu_panic(dev, "Virtqueue size exceeded");
+ return NULL;
+ }
+
+ if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
+ return NULL;
+ }
+
+ if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
+ vring_set_avail_event(vq, vq->last_avail_idx);
+ }
+
+ i = head;
+ desc = vq->vring.desc;
+ if (desc[i].flags & VRING_DESC_F_INDIRECT) {
+ if (desc[i].len % sizeof(struct vring_desc)) {
+ vu_panic(dev, "Invalid size for indirect buffer table");
+ }
+
+ /* loop over the indirect descriptor table */
+ max = desc[i].len / sizeof(struct vring_desc);
+ desc = vu_gpa_to_va(dev, desc[i].addr);
+ i = 0;
+ }
+
+ /* Collect all the descriptors */
+ do {
+ if (desc[i].flags & VRING_DESC_F_WRITE) {
+ virtqueue_map_desc(dev, &in_num, iov + out_num,
+ VIRTQUEUE_MAX_SIZE - out_num, true,
+ desc[i].addr, desc[i].len);
+ } else {
+ if (in_num) {
+ vu_panic(dev, "Incorrect order for descriptors");
+ return NULL;
+ }
+ virtqueue_map_desc(dev, &out_num, iov,
+ VIRTQUEUE_MAX_SIZE, false,
+ desc[i].addr, desc[i].len);
+ }
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if ((in_num + out_num) > max) {
+ vu_panic(dev, "Looped descriptor");
+ }
+ rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR) {
+ return NULL;
+ }
+
+ /* Now copy what we have collected and mapped */
+ elem = virtqueue_alloc_element(sz, out_num, in_num);
+ elem->index = head;
+ for (i = 0; i < out_num; i++) {
+ elem->out_sg[i] = iov[i];
+ }
+ for (i = 0; i < in_num; i++) {
+ elem->in_sg[i] = iov[out_num + i];
+ }
+
+ vq->inuse++;
+
+ return elem;
+}
+
+bool
+vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
+{
+ if (num > vq->inuse) {
+ return false;
+ }
+ vq->last_avail_idx -= num;
+ vq->inuse -= num;
+ return true;
+}
+
+static inline
+void vring_used_write(VuDev *dev, VuVirtq *vq,
+ struct vring_used_elem *uelem, int i)
+{
+ struct vring_used *used = vq->vring.used;
+
+ used->ring[i] = *uelem;
+ vu_log_write(dev, vq->vring.log_guest_addr +
+ offsetof(struct vring_used, ring[i]),
+ sizeof(used->ring[i]));
+}
+
+
+static void
+vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem,
+ unsigned int len)
+{
+ struct vring_desc *desc = vq->vring.desc;
+ unsigned int i, max, min;
+ unsigned num_bufs = 0;
+
+ max = vq->vring.num;
+ i = elem->index;
+
+ if (desc[i].flags & VRING_DESC_F_INDIRECT) {
+ if (desc[i].len % sizeof(struct vring_desc)) {
+ vu_panic(dev, "Invalid size for indirect buffer table");
+ }
+
+ /* loop over the indirect descriptor table */
+ max = desc[i].len / sizeof(struct vring_desc);
+ desc = vu_gpa_to_va(dev, desc[i].addr);
+ i = 0;
+ }
+
+ do {
+ if (++num_bufs > max) {
+ vu_panic(dev, "Looped descriptor");
+ return;
+ }
+
+ if (desc[i].flags & VRING_DESC_F_WRITE) {
+ min = MIN(desc[i].len, len);
+ vu_log_write(dev, desc[i].addr, min);
+ len -= min;
+ }
+
+ } while (len > 0 &&
+ (virtqueue_read_next_desc(dev, desc, i, max, &i)
+ == VIRTQUEUE_READ_DESC_MORE));
+}
+
+void
+vu_queue_fill(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem,
+ unsigned int len, unsigned int idx)
+{
+ struct vring_used_elem uelem;
+
+ if (unlikely(dev->broken)) {
+ return;
+ }
+
+ vu_log_queue_fill(dev, vq, elem, len);
+
+ idx = (idx + vq->used_idx) % vq->vring.num;
+
+ uelem.id = elem->index;
+ uelem.len = len;
+ vring_used_write(dev, vq, &uelem, idx);
+}
+
+static inline
+void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
+{
+ vq->vring.used->idx = val;
+ vu_log_write(dev,
+ vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
+ sizeof(vq->vring.used->idx));
+
+ vq->used_idx = val;
+}
+
+void
+vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
+{
+ uint16_t old, new;
+
+ if (unlikely(dev->broken)) {
+ return;
+ }
+
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+
+ old = vq->used_idx;
+ new = old + count;
+ vring_used_idx_set(dev, vq, new);
+ vq->inuse -= count;
+ if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
+ vq->signalled_used_valid = false;
+ }
+}
+
+void
+vu_queue_push(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem, unsigned int len)
+{
+ vu_queue_fill(dev, vq, elem, len, 0);
+ vu_queue_flush(dev, vq, 1);
+}
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
new file mode 100644
index 0000000000..156b50e989
--- /dev/null
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -0,0 +1,435 @@
+/*
+ * Vhost User library
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Victor Kaplansky <victork@redhat.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef LIBVHOST_USER_H
+#define LIBVHOST_USER_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <linux/vhost.h>
+#include "standard-headers/linux/virtio_ring.h"
+
+/* Based on qemu/hw/virtio/vhost-user.c */
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VHOST_LOG_PAGE 4096
+
+#define VHOST_MAX_NR_VIRTQUEUE 8
+#define VIRTQUEUE_MAX_SIZE 1024
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
+
+typedef enum VhostUserRequest {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_INPUT_GET_CONFIG = 20,
+ VHOST_USER_MAX
+} VhostUserRequest;
+
+typedef struct VhostUserMemoryRegion {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size;
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+} VhostUserMemoryRegion;
+
+typedef struct VhostUserMemory {
+ uint32_t nregions;
+ uint32_t padding;
+ VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+} VhostUserMemory;
+
+typedef struct VhostUserLog {
+ uint64_t mmap_size;
+ uint64_t mmap_offset;
+} VhostUserLog;
+
+#if defined(_WIN32)
+# define VU_PACKED __attribute__((gcc_struct, packed))
+#else
+# define VU_PACKED __attribute__((packed))
+#endif
+
+typedef struct VhostUserMsg {
+ VhostUserRequest request;
+
+#define VHOST_USER_VERSION_MASK (0x3)
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+
+ union {
+#define VHOST_USER_VRING_IDX_MASK (0xff)
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ VhostUserMemory memory;
+ VhostUserLog log;
+ } payload;
+
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ int fd_num;
+ uint8_t *data;
+} VU_PACKED VhostUserMsg;
+
+typedef struct VuDevRegion {
+ /* Guest Physical address. */
+ uint64_t gpa;
+ /* Memory region size. */
+ uint64_t size;
+ /* QEMU virtual address (userspace). */
+ uint64_t qva;
+ /* Starting offset in our mmaped space. */
+ uint64_t mmap_offset;
+ /* Start address of mmaped space. */
+ uint64_t mmap_addr;
+} VuDevRegion;
+
+typedef struct VuDev VuDev;
+
+typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
+typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
+typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
+ int *do_reply);
+typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
+
+typedef struct VuDevIface {
+ /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
+ vu_get_features_cb get_features;
+ /* enable vhost implementation features */
+ vu_set_features_cb set_features;
+ /* get the protocol feature bitmask from the underlying vhost
+ * implementation */
+ vu_get_features_cb get_protocol_features;
+ /* enable protocol features in the underlying vhost implementation. */
+ vu_set_features_cb set_protocol_features;
+ /* process_msg is called for each vhost-user message received */
+ /* skip libvhost-user processing if return value != 0 */
+ vu_process_msg_cb process_msg;
+ /* tells when queues can be processed */
+ vu_queue_set_started_cb queue_set_started;
+} VuDevIface;
+
+typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
+
+typedef struct VuRing {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint64_t log_guest_addr;
+ uint32_t flags;
+} VuRing;
+
+typedef struct VuVirtq {
+ VuRing vring;
+
+ /* Next head to pop */
+ uint16_t last_avail_idx;
+
+ /* Last avail_idx read from VQ. */
+ uint16_t shadow_avail_idx;
+
+ uint16_t used_idx;
+
+ /* Last used index value we have signalled on */
+ uint16_t signalled_used;
+
+ /* Last used index value we have signalled on */
+ bool signalled_used_valid;
+
+ /* Notification enabled? */
+ bool notification;
+
+ int inuse;
+
+ vu_queue_handler_cb handler;
+
+ int call_fd;
+ int kick_fd;
+ int err_fd;
+ unsigned int enable;
+ bool started;
+} VuVirtq;
+
+enum VuWatchCondtion {
+ VU_WATCH_IN = 1 << 0,
+ VU_WATCH_OUT = 1 << 1,
+ VU_WATCH_PRI = 1 << 2,
+ VU_WATCH_ERR = 1 << 3,
+ VU_WATCH_HUP = 1 << 4,
+};
+
+typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
+typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
+typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
+ vu_watch_cb cb, void *data);
+typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
+
+struct VuDev {
+ int sock;
+ uint32_t nregions;
+ VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
+ VuVirtq vq[VHOST_MAX_NR_VIRTQUEUE];
+ int log_call_fd;
+ uint64_t log_size;
+ uint8_t *log_table;
+ uint64_t features;
+ uint64_t protocol_features;
+ bool broken;
+
+ /* @set_watch: add or update the given fd to the watch set,
+ * call cb when condition is met */
+ vu_set_watch_cb set_watch;
+
+ /* @remove_watch: remove the given fd from the watch set */
+ vu_remove_watch_cb remove_watch;
+
+ /* @panic: encountered an unrecoverable error, you may try to
+ * re-initialize */
+ vu_panic_cb panic;
+ const VuDevIface *iface;
+};
+
+typedef struct VuVirtqElement {
+ unsigned int index;
+ unsigned int out_num;
+ unsigned int in_num;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+} VuVirtqElement;
+
+/**
+ * vu_init:
+ * @dev: a VuDev context
+ * @socket: the socket connected to vhost-user master
+ * @panic: a panic callback
+ * @set_watch: a set_watch callback
+ * @remove_watch: a remove_watch callback
+ * @iface: a VuDevIface structure with vhost-user device callbacks
+ *
+ * Intializes a VuDev vhost-user context.
+ **/
+void vu_init(VuDev *dev,
+ int socket,
+ vu_panic_cb panic,
+ vu_set_watch_cb set_watch,
+ vu_remove_watch_cb remove_watch,
+ const VuDevIface *iface);
+
+
+/**
+ * vu_deinit:
+ * @dev: a VuDev context
+ *
+ * Cleans up the VuDev context
+ */
+void vu_deinit(VuDev *dev);
+
+/**
+ * vu_dispatch:
+ * @dev: a VuDev context
+ *
+ * Process one vhost-user message.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_dispatch(VuDev *dev);
+
+/**
+ * vu_gpa_to_va:
+ * @dev: a VuDev context
+ * @guest_addr: guest address
+ *
+ * Translate a guest address to a pointer. Returns NULL on failure.
+ */
+void *vu_gpa_to_va(VuDev *dev, uint64_t guest_addr);
+
+/**
+ * vu_get_queue:
+ * @dev: a VuDev context
+ * @qidx: queue index
+ *
+ * Returns the queue number @qidx.
+ */
+VuVirtq *vu_get_queue(VuDev *dev, int qidx);
+
+/**
+ * vu_set_queue_handler:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @handler: the queue handler callback
+ *
+ * Set the queue handler. This function may be called several times
+ * for the same queue. If called with NULL @handler, the handler is
+ * removed.
+ */
+void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
+ vu_queue_handler_cb handler);
+
+
+/**
+ * vu_queue_set_notification:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @enable: state
+ *
+ * Set whether the queue notifies (via event index or interrupt)
+ */
+void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
+
+/**
+ * vu_queue_enabled:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: whether the queue is enabled.
+ */
+bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_enabled:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Returns: whether the queue is empty.
+ */
+int vu_queue_empty(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_notify:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ *
+ * Request to notify the queue via callfd (skipped if unnecessary)
+ */
+void vu_queue_notify(VuDev *dev, VuVirtq *vq);
+
+/**
+ * vu_queue_pop:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @sz: the size of struct to return (must be >= VuVirtqElement)
+ *
+ * Returns: a VuVirtqElement filled from the queue or NULL.
+ */
+void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
+
+/**
+ * vu_queue_rewind:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @num: number of elements to push back
+ *
+ * Pretend that elements weren't popped from the virtqueue. The next
+ * virtqueue_pop() will refetch the oldest element.
+ *
+ * Returns: true on success, false if @num is greater than the number of in use
+ * elements.
+ */
+bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
+
+/**
+ * vu_queue_fill:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: a VuVirtqElement
+ * @len: length in bytes to write
+ * @idx: optional offset for the used ring index (0 in general)
+ *
+ * Fill the used ring with @elem element.
+ */
+void vu_queue_fill(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem,
+ unsigned int len, unsigned int idx);
+
+/**
+ * vu_queue_push:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @elem: a VuVirtqElement
+ * @len: length in bytes to write
+ *
+ * Helper that combines vu_queue_fill() with a vu_queue_flush().
+ */
+void vu_queue_push(VuDev *dev, VuVirtq *vq,
+ const VuVirtqElement *elem, unsigned int len);
+
+/**
+ * vu_queue_flush:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @num: number of elements to flush
+ *
+ * Mark the last number of elements as done (used.idx is updated by
+ * num elements).
+*/
+void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
+
+/**
+ * vu_queue_get_avail_bytes:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @in_bytes: in bytes
+ * @out_bytes: out bytes
+ * @max_in_bytes: stop counting after max_in_bytes
+ * @max_out_bytes: stop counting after max_out_bytes
+ *
+ * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
+ */
+void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
+ unsigned int *out_bytes,
+ unsigned max_in_bytes, unsigned max_out_bytes);
+
+/**
+ * vu_queue_avail_bytes:
+ * @dev: a VuDev context
+ * @vq: a VuVirtq queue
+ * @in_bytes: expected in bytes
+ * @out_bytes: expected out bytes
+ *
+ * Returns: true if in_bytes <= in_total && out_bytes <= out_total
+ */
+bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
+ unsigned int out_bytes);
+
+#endif /* LIBVHOST_USER_H */
diff --git a/cpus.c b/cpus.c
index 5213351c6d..71a82e5004 100644
--- a/cpus.c
+++ b/cpus.c
@@ -33,7 +33,9 @@
#include "sysemu/block-backend.h"
#include "exec/gdbstub.h"
#include "sysemu/dma.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
+#include "sysemu/hax.h"
#include "qmp-commands.h"
#include "exec/exec-all.h"
@@ -1220,6 +1222,46 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
return NULL;
}
+static void *qemu_hax_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+ int r;
+ qemu_thread_get_self(cpu->thread);
+ qemu_mutex_lock(&qemu_global_mutex);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ cpu->halted = 0;
+ current_cpu = cpu;
+
+ hax_init_vcpu(cpu);
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ while (1) {
+ if (cpu_can_run(cpu)) {
+ r = hax_smp_cpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+
+ while (cpu_thread_is_idle(cpu)) {
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ }
+#ifdef _WIN32
+ SleepEx(0, TRUE);
+#endif
+ qemu_wait_io_event_common(cpu);
+ }
+ return NULL;
+}
+
+#ifdef _WIN32
+static void CALLBACK dummy_apc_func(ULONG_PTR unused)
+{
+}
+#endif
+
static void qemu_cpu_kick_thread(CPUState *cpu)
{
#ifndef _WIN32
@@ -1235,7 +1277,13 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
exit(1);
}
#else /* _WIN32 */
- abort();
+ if (!qemu_cpu_is_self(cpu)) {
+ if (!QueueUserAPC(dummy_apc_func, cpu->hThread, 0)) {
+ fprintf(stderr, "%s: QueueUserAPC failed with error %lu\n",
+ __func__, GetLastError());
+ exit(1);
+ }
+ }
#endif
}
@@ -1258,6 +1306,13 @@ void qemu_cpu_kick(CPUState *cpu)
if (tcg_enabled()) {
qemu_cpu_kick_no_halt();
} else {
+ if (hax_enabled()) {
+ /*
+ * FIXME: race condition with the exit_request check in
+ * hax_vcpu_hax_exec
+ */
+ cpu->exit_request = 1;
+ }
qemu_cpu_kick_thread(cpu);
}
}
@@ -1418,6 +1473,26 @@ static void qemu_tcg_init_vcpu(CPUState *cpu)
}
}
+static void qemu_hax_start_vcpu(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+#ifdef _WIN32
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
+#endif
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
+}
+
static void qemu_kvm_start_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
@@ -1468,6 +1543,8 @@ void qemu_init_vcpu(CPUState *cpu)
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
+ } else if (hax_enabled()) {
+ qemu_hax_start_vcpu(cpu);
} else if (tcg_enabled()) {
qemu_tcg_init_vcpu(cpu);
} else {
diff --git a/cputlb.c b/cputlb.c
index 813279f3bc..6c39927455 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -60,24 +60,15 @@
/* statistics */
int tlb_flush_count;
-/* NOTE:
- * If flush_global is true (the usual case), flush all tlb entries.
- * If flush_global is false, flush (at least) all tlb entries not
- * marked global.
- *
- * Since QEMU doesn't currently implement a global/not-global flag
- * for tlb entries, at the moment tlb_flush() will also flush all
- * tlb entries in the flush_global == false case. This is OK because
- * CPU architectures generally permit an implementation to drop
- * entries from the TLB at any time, so flushing more entries than
- * required is only an efficiency issue, not a correctness issue.
+/* This is OK because CPU architectures generally permit an
+ * implementation to drop entries from the TLB at any time, so
+ * flushing more entries than required is only an efficiency issue,
+ * not a correctness issue.
*/
-void tlb_flush(CPUState *cpu, int flush_global)
+void tlb_flush(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
- tlb_debug("(%d)\n", flush_global);
-
memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
@@ -144,7 +135,7 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
env->tlb_flush_addr, env->tlb_flush_mask);
- tlb_flush(cpu, 1);
+ tlb_flush(cpu);
return;
}
diff --git a/default-configs/hppa-linux-user.mak b/default-configs/hppa-linux-user.mak
new file mode 100644
index 0000000000..796393940b
--- /dev/null
+++ b/default-configs/hppa-linux-user.mak
@@ -0,0 +1 @@
+# Default configuration for hppa-linux-user
diff --git a/default-configs/m68k-softmmu.mak b/default-configs/m68k-softmmu.mak
index d9552df076..60f7cdfbf2 100644
--- a/default-configs/m68k-softmmu.mak
+++ b/default-configs/m68k-softmmu.mak
@@ -1,6 +1,4 @@
# Default configuration for m68k-softmmu
-include pci.mak
-include usb.mak
CONFIG_COLDFIRE=y
CONFIG_PTIMER=y
diff --git a/default-configs/nios2-linux-user.mak b/default-configs/nios2-linux-user.mak
new file mode 100644
index 0000000000..5be3eb795d
--- /dev/null
+++ b/default-configs/nios2-linux-user.mak
@@ -0,0 +1 @@
+# Default configuration for nios2-linux-user
diff --git a/default-configs/nios2-softmmu.mak b/default-configs/nios2-softmmu.mak
new file mode 100644
index 0000000000..74dc70caae
--- /dev/null
+++ b/default-configs/nios2-softmmu.mak
@@ -0,0 +1,6 @@
+# Default configuration for nios2-softmmu
+
+CONFIG_NIOS2=y
+CONFIG_SERIAL=y
+CONFIG_PTIMER=y
+CONFIG_ALTERA_TIMER=y
diff --git a/default-configs/sparc64-softmmu.mak b/default-configs/sparc64-softmmu.mak
index c0cdd644c8..c581e61605 100644
--- a/default-configs/sparc64-softmmu.mak
+++ b/default-configs/sparc64-softmmu.mak
@@ -13,3 +13,5 @@ CONFIG_IDE_CMD646=y
CONFIG_PCI_APB=y
CONFIG_MC146818RTC=y
CONFIG_ISA_TESTDEV=y
+CONFIG_EMPTY_SLOT=y
+CONFIG_SUN4V_RTC=y
diff --git a/disas.c b/disas.c
index 67f116a19b..05a7a1260a 100644
--- a/disas.c
+++ b/disas.c
@@ -310,6 +310,8 @@ void disas(FILE *out, void *code, unsigned long size)
print_insn = print_insn_m68k;
#elif defined(__s390__)
print_insn = print_insn_s390;
+#elif defined(__hppa__)
+ print_insn = print_insn_hppa;
#elif defined(__ia64__)
print_insn = print_insn_ia64;
#endif
diff --git a/disas/Makefile.objs b/disas/Makefile.objs
index 09bc992883..62632ef0dd 100644
--- a/disas/Makefile.objs
+++ b/disas/Makefile.objs
@@ -9,11 +9,13 @@ libvixldir = $(SRC_PATH)/disas/libvixl
# versions do not.
arm-a64.o-cflags := -I$(libvixldir) -Wno-sign-compare
common-obj-$(CONFIG_CRIS_DIS) += cris.o
+common-obj-$(CONFIG_HPPA_DIS) += hppa.o
common-obj-$(CONFIG_I386_DIS) += i386.o
common-obj-$(CONFIG_IA64_DIS) += ia64.o
common-obj-$(CONFIG_M68K_DIS) += m68k.o
common-obj-$(CONFIG_MICROBLAZE_DIS) += microblaze.o
common-obj-$(CONFIG_MIPS_DIS) += mips.o
+common-obj-$(CONFIG_NIOS2_DIS) += nios2.o
common-obj-$(CONFIG_MOXIE_DIS) += moxie.o
common-obj-$(CONFIG_PPC_DIS) += ppc.o
common-obj-$(CONFIG_S390_DIS) += s390.o
diff --git a/disas/cris.c b/disas/cris.c
index 08161d1f21..8a1daf936c 100644
--- a/disas/cris.c
+++ b/disas/cris.c
@@ -2490,7 +2490,7 @@ print_with_operands (const struct cris_opcode *opcodep,
const struct cris_spec_reg *sregp
= spec_reg_info ((insn >> 12) & 15, disdata->distype);
- if (sregp->name == NULL)
+ if (sregp == NULL || sregp->name == NULL)
/* Should have been caught as a non-match earlier. */
*tp++ = '?';
else
diff --git a/disas/hppa.c b/disas/hppa.c
new file mode 100644
index 0000000000..43facdc47b
--- /dev/null
+++ b/disas/hppa.c
@@ -0,0 +1,2832 @@
+/* Disassembler for the PA-RISC. Somewhat derived from sparc-pinsn.c.
+ Copyright 1989, 1990, 1992, 1993, 1994, 1995, 1998, 1999, 2000, 2001, 2003,
+ 2005 Free Software Foundation, Inc.
+
+ Contributed by the Center for Software Science at the
+ University of Utah (pa-gdb-bugs@cs.utah.edu).
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>. */
+
+#include "qemu/osdep.h"
+#include "disas/bfd.h"
+
+/* HP PA-RISC SOM object file format: definitions internal to BFD.
+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
+ 2003 Free Software Foundation, Inc.
+
+ Contributed by the Center for Software Science at the
+ University of Utah (pa-gdb-bugs@cs.utah.edu).
+
+ This file is part of BFD, the Binary File Descriptor library.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef _LIBHPPA_H
+#define _LIBHPPA_H
+
+#define BYTES_IN_WORD 4
+#define PA_PAGESIZE 0x1000
+
+/* The PA instruction set variants. */
+enum pa_arch {pa10 = 10, pa11 = 11, pa20 = 20, pa20w = 25};
+
+/* HP PA-RISC relocation types */
+
+enum hppa_reloc_field_selector_type
+ {
+ R_HPPA_FSEL = 0x0,
+ R_HPPA_LSSEL = 0x1,
+ R_HPPA_RSSEL = 0x2,
+ R_HPPA_LSEL = 0x3,
+ R_HPPA_RSEL = 0x4,
+ R_HPPA_LDSEL = 0x5,
+ R_HPPA_RDSEL = 0x6,
+ R_HPPA_LRSEL = 0x7,
+ R_HPPA_RRSEL = 0x8,
+ R_HPPA_NSEL = 0x9,
+ R_HPPA_NLSEL = 0xa,
+ R_HPPA_NLRSEL = 0xb,
+ R_HPPA_PSEL = 0xc,
+ R_HPPA_LPSEL = 0xd,
+ R_HPPA_RPSEL = 0xe,
+ R_HPPA_TSEL = 0xf,
+ R_HPPA_LTSEL = 0x10,
+ R_HPPA_RTSEL = 0x11,
+ R_HPPA_LTPSEL = 0x12,
+ R_HPPA_RTPSEL = 0x13
+ };
+
+/* /usr/include/reloc.h defines these to constants. We want to use
+ them in enums, so #undef them before we start using them. We might
+ be able to fix this another way by simply managing not to include
+ /usr/include/reloc.h, but currently GDB picks up these defines
+ somewhere. */
+#undef e_fsel
+#undef e_lssel
+#undef e_rssel
+#undef e_lsel
+#undef e_rsel
+#undef e_ldsel
+#undef e_rdsel
+#undef e_lrsel
+#undef e_rrsel
+#undef e_nsel
+#undef e_nlsel
+#undef e_nlrsel
+#undef e_psel
+#undef e_lpsel
+#undef e_rpsel
+#undef e_tsel
+#undef e_ltsel
+#undef e_rtsel
+#undef e_one
+#undef e_two
+#undef e_pcrel
+#undef e_con
+#undef e_plabel
+#undef e_abs
+
+/* for compatibility */
+enum hppa_reloc_field_selector_type_alt
+ {
+ e_fsel = R_HPPA_FSEL,
+ e_lssel = R_HPPA_LSSEL,
+ e_rssel = R_HPPA_RSSEL,
+ e_lsel = R_HPPA_LSEL,
+ e_rsel = R_HPPA_RSEL,
+ e_ldsel = R_HPPA_LDSEL,
+ e_rdsel = R_HPPA_RDSEL,
+ e_lrsel = R_HPPA_LRSEL,
+ e_rrsel = R_HPPA_RRSEL,
+ e_nsel = R_HPPA_NSEL,
+ e_nlsel = R_HPPA_NLSEL,
+ e_nlrsel = R_HPPA_NLRSEL,
+ e_psel = R_HPPA_PSEL,
+ e_lpsel = R_HPPA_LPSEL,
+ e_rpsel = R_HPPA_RPSEL,
+ e_tsel = R_HPPA_TSEL,
+ e_ltsel = R_HPPA_LTSEL,
+ e_rtsel = R_HPPA_RTSEL,
+ e_ltpsel = R_HPPA_LTPSEL,
+ e_rtpsel = R_HPPA_RTPSEL
+ };
+
+enum hppa_reloc_expr_type
+ {
+ R_HPPA_E_ONE = 0,
+ R_HPPA_E_TWO = 1,
+ R_HPPA_E_PCREL = 2,
+ R_HPPA_E_CON = 3,
+ R_HPPA_E_PLABEL = 7,
+ R_HPPA_E_ABS = 18
+ };
+
+/* for compatibility */
+enum hppa_reloc_expr_type_alt
+ {
+ e_one = R_HPPA_E_ONE,
+ e_two = R_HPPA_E_TWO,
+ e_pcrel = R_HPPA_E_PCREL,
+ e_con = R_HPPA_E_CON,
+ e_plabel = R_HPPA_E_PLABEL,
+ e_abs = R_HPPA_E_ABS
+ };
+
+
+/* Relocations for function calls must be accompanied by parameter
+ relocation bits. These bits describe exactly where the caller has
+ placed the function's arguments and where it expects to find a return
+ value.
+
+ Both ELF and SOM encode this information within the addend field
+ of the call relocation. (Note this could break very badly if one
+ was to make a call like bl foo + 0x12345678).
+
+ The high order 10 bits contain parameter relocation information,
+ the low order 22 bits contain the constant offset. */
+
+#define HPPA_R_ARG_RELOC(a) \
+ (((a) >> 22) & 0x3ff)
+#define HPPA_R_CONSTANT(a) \
+ ((((bfd_signed_vma)(a)) << (BFD_ARCH_SIZE-22)) >> (BFD_ARCH_SIZE-22))
+#define HPPA_R_ADDEND(r, c) \
+ (((r) << 22) + ((c) & 0x3fffff))
+
+
+/* Some functions to manipulate PA instructions. */
+
+/* Declare the functions with the unused attribute to avoid warnings. */
+static inline int sign_extend (int, int) ATTRIBUTE_UNUSED;
+static inline int low_sign_extend (int, int) ATTRIBUTE_UNUSED;
+static inline int sign_unext (int, int) ATTRIBUTE_UNUSED;
+static inline int low_sign_unext (int, int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_3 (int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_12 (int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_14 (int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_16 (int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_17 (int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_21 (int) ATTRIBUTE_UNUSED;
+static inline int re_assemble_22 (int) ATTRIBUTE_UNUSED;
+static inline bfd_signed_vma hppa_field_adjust
+ (bfd_vma, bfd_signed_vma, enum hppa_reloc_field_selector_type_alt)
+ ATTRIBUTE_UNUSED;
+static inline int hppa_rebuild_insn (int, int, int) ATTRIBUTE_UNUSED;
+
+
+/* The *sign_extend functions are used to assemble various bitfields
+ taken from an instruction and return the resulting immediate
+ value. */
+
+static inline int
+sign_extend (int x, int len)
+{
+ int signbit = (1 << (len - 1));
+ int mask = (signbit << 1) - 1;
+ return ((x & mask) ^ signbit) - signbit;
+}
+
+static inline int
+low_sign_extend (int x, int len)
+{
+ return (x >> 1) - ((x & 1) << (len - 1));
+}
+
+
+/* The re_assemble_* functions prepare an immediate value for
+ insertion into an opcode. pa-risc uses all sorts of weird bitfields
+ in the instruction to hold the value. */
+
+static inline int
+sign_unext (int x, int len)
+{
+ int len_ones;
+
+ len_ones = (1 << len) - 1;
+
+ return x & len_ones;
+}
+
+static inline int
+low_sign_unext (int x, int len)
+{
+ int temp;
+ int sign;
+
+ sign = (x >> (len-1)) & 1;
+
+ temp = sign_unext (x, len-1);
+
+ return (temp << 1) | sign;
+}
+
+static inline int
+re_assemble_3 (int as3)
+{
+ return (( (as3 & 4) << (13-2))
+ | ((as3 & 3) << (13+1)));
+}
+
+static inline int
+re_assemble_12 (int as12)
+{
+ return (( (as12 & 0x800) >> 11)
+ | ((as12 & 0x400) >> (10 - 2))
+ | ((as12 & 0x3ff) << (1 + 2)));
+}
+
+static inline int
+re_assemble_14 (int as14)
+{
+ return (( (as14 & 0x1fff) << 1)
+ | ((as14 & 0x2000) >> 13));
+}
+
+static inline int
+re_assemble_16 (int as16)
+{
+ int s, t;
+
+ /* Unusual 16-bit encoding, for wide mode only. */
+ t = (as16 << 1) & 0xffff;
+ s = (as16 & 0x8000);
+ return (t ^ s ^ (s >> 1)) | (s >> 15);
+}
+
+static inline int
+re_assemble_17 (int as17)
+{
+ return (( (as17 & 0x10000) >> 16)
+ | ((as17 & 0x0f800) << (16 - 11))
+ | ((as17 & 0x00400) >> (10 - 2))
+ | ((as17 & 0x003ff) << (1 + 2)));
+}
+
+static inline int
+re_assemble_21 (int as21)
+{
+ return (( (as21 & 0x100000) >> 20)
+ | ((as21 & 0x0ffe00) >> 8)
+ | ((as21 & 0x000180) << 7)
+ | ((as21 & 0x00007c) << 14)
+ | ((as21 & 0x000003) << 12));
+}
+
+static inline int
+re_assemble_22 (int as22)
+{
+ return (( (as22 & 0x200000) >> 21)
+ | ((as22 & 0x1f0000) << (21 - 16))
+ | ((as22 & 0x00f800) << (16 - 11))
+ | ((as22 & 0x000400) >> (10 - 2))
+ | ((as22 & 0x0003ff) << (1 + 2)));
+}
+
+
+/* Handle field selectors for PA instructions.
+ The L and R (and LS, RS etc.) selectors are used in pairs to form a
+ full 32 bit address. eg.
+
+ LDIL L'start,%r1 ; put left part into r1
+ LDW R'start(%r1),%r2 ; add r1 and right part to form address
+
+ This function returns sign extended values in all cases.
+*/
+
+static inline bfd_signed_vma
+hppa_field_adjust (bfd_vma sym_val,
+ bfd_signed_vma addend,
+ enum hppa_reloc_field_selector_type_alt r_field)
+{
+ bfd_signed_vma value;
+
+ value = sym_val + addend;
+ switch (r_field)
+ {
+ case e_fsel:
+ /* F: No change. */
+ break;
+
+ case e_nsel:
+ /* N: null selector. I don't really understand what this is all
+ about, but HP's documentation says "this indicates that zero
+ bits are to be used for the displacement on the instruction.
+ This fixup is used to identify three-instruction sequences to
+ access data (for importing shared library data)." */
+ value = 0;
+ break;
+
+ case e_lsel:
+ case e_nlsel:
+ /* L: Select top 21 bits. */
+ value = value >> 11;
+ break;
+
+ case e_rsel:
+ /* R: Select bottom 11 bits. */
+ value = value & 0x7ff;
+ break;
+
+ case e_lssel:
+ /* LS: Round to nearest multiple of 2048 then select top 21 bits. */
+ value = value + 0x400;
+ value = value >> 11;
+ break;
+
+ case e_rssel:
+ /* RS: Select bottom 11 bits for LS.
+ We need to return a value such that 2048 * LS'x + RS'x == x.
+ ie. RS'x = x - ((x + 0x400) & -0x800)
+ this is just a sign extension from bit 21. */
+ value = ((value & 0x7ff) ^ 0x400) - 0x400;
+ break;
+
+ case e_ldsel:
+ /* LD: Round to next multiple of 2048 then select top 21 bits.
+ Yes, if we are already on a multiple of 2048, we go up to the
+ next one. RD in this case will be -2048. */
+ value = value + 0x800;
+ value = value >> 11;
+ break;
+
+ case e_rdsel:
+ /* RD: Set bits 0-20 to one. */
+ value = value | -0x800;
+ break;
+
+ case e_lrsel:
+ case e_nlrsel:
+ /* LR: L with rounding of the addend to nearest 8k. */
+ value = sym_val + ((addend + 0x1000) & -0x2000);
+ value = value >> 11;
+ break;
+
+ case e_rrsel:
+ /* RR: R with rounding of the addend to nearest 8k.
+ We need to return a value such that 2048 * LR'x + RR'x == x
+ ie. RR'x = s+a - (s + (((a + 0x1000) & -0x2000) & -0x800))
+ . = s+a - ((s & -0x800) + ((a + 0x1000) & -0x2000))
+ . = (s & 0x7ff) + a - ((a + 0x1000) & -0x2000) */
+ value = (sym_val & 0x7ff) + (((addend & 0x1fff) ^ 0x1000) - 0x1000);
+ break;
+
+ default:
+ abort ();
+ }
+ return value;
+}
+
+/* PA-RISC OPCODES */
+#define get_opcode(insn) (((insn) >> 26) & 0x3f)
+
+enum hppa_opcode_type
+{
+ /* None of the opcodes in the first group generate relocs, so we
+ aren't too concerned about them. */
+ OP_SYSOP = 0x00,
+ OP_MEMMNG = 0x01,
+ OP_ALU = 0x02,
+ OP_NDXMEM = 0x03,
+ OP_SPOP = 0x04,
+ OP_DIAG = 0x05,
+ OP_FMPYADD = 0x06,
+ OP_UNDEF07 = 0x07,
+ OP_COPRW = 0x09,
+ OP_COPRDW = 0x0b,
+ OP_COPR = 0x0c,
+ OP_FLOAT = 0x0e,
+ OP_PRDSPEC = 0x0f,
+ OP_UNDEF15 = 0x15,
+ OP_UNDEF1d = 0x1d,
+ OP_FMPYSUB = 0x26,
+ OP_FPFUSED = 0x2e,
+ OP_SHEXDP0 = 0x34,
+ OP_SHEXDP1 = 0x35,
+ OP_SHEXDP2 = 0x36,
+ OP_UNDEF37 = 0x37,
+ OP_SHEXDP3 = 0x3c,
+ OP_SHEXDP4 = 0x3d,
+ OP_MULTMED = 0x3e,
+ OP_UNDEF3f = 0x3f,
+
+ OP_LDIL = 0x08,
+ OP_ADDIL = 0x0a,
+
+ OP_LDO = 0x0d,
+ OP_LDB = 0x10,
+ OP_LDH = 0x11,
+ OP_LDW = 0x12,
+ OP_LDWM = 0x13,
+ OP_STB = 0x18,
+ OP_STH = 0x19,
+ OP_STW = 0x1a,
+ OP_STWM = 0x1b,
+
+ OP_LDD = 0x14,
+ OP_STD = 0x1c,
+
+ OP_FLDW = 0x16,
+ OP_LDWL = 0x17,
+ OP_FSTW = 0x1e,
+ OP_STWL = 0x1f,
+
+ OP_COMBT = 0x20,
+ OP_COMIBT = 0x21,
+ OP_COMBF = 0x22,
+ OP_COMIBF = 0x23,
+ OP_CMPBDT = 0x27,
+ OP_ADDBT = 0x28,
+ OP_ADDIBT = 0x29,
+ OP_ADDBF = 0x2a,
+ OP_ADDIBF = 0x2b,
+ OP_CMPBDF = 0x2f,
+ OP_BVB = 0x30,
+ OP_BB = 0x31,
+ OP_MOVB = 0x32,
+ OP_MOVIB = 0x33,
+ OP_CMPIBD = 0x3b,
+
+ OP_COMICLR = 0x24,
+ OP_SUBI = 0x25,
+ OP_ADDIT = 0x2c,
+ OP_ADDI = 0x2d,
+
+ OP_BE = 0x38,
+ OP_BLE = 0x39,
+ OP_BL = 0x3a
+};
+
+
+/* Insert VALUE into INSN using R_FORMAT to determine exactly what
+ bits to change. */
+
+static inline int
+hppa_rebuild_insn (int insn, int value, int r_format)
+{
+ switch (r_format)
+ {
+ case 11:
+ return (insn & ~ 0x7ff) | low_sign_unext (value, 11);
+
+ case 12:
+ return (insn & ~ 0x1ffd) | re_assemble_12 (value);
+
+
+ case 10:
+ return (insn & ~ 0x3ff1) | re_assemble_14 (value & -8);
+
+ case -11:
+ return (insn & ~ 0x3ff9) | re_assemble_14 (value & -4);
+
+ case 14:
+ return (insn & ~ 0x3fff) | re_assemble_14 (value);
+
+
+ case -10:
+ return (insn & ~ 0xfff1) | re_assemble_16 (value & -8);
+
+ case -16:
+ return (insn & ~ 0xfff9) | re_assemble_16 (value & -4);
+
+ case 16:
+ return (insn & ~ 0xffff) | re_assemble_16 (value);
+
+
+ case 17:
+ return (insn & ~ 0x1f1ffd) | re_assemble_17 (value);
+
+ case 21:
+ return (insn & ~ 0x1fffff) | re_assemble_21 (value);
+
+ case 22:
+ return (insn & ~ 0x3ff1ffd) | re_assemble_22 (value);
+
+ case 32:
+ return value;
+
+ default:
+ abort ();
+ }
+ return insn;
+}
+
+#endif /* _LIBHPPA_H */
+/* Table of opcodes for the PA-RISC.
+ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
+ 2001, 2002, 2003, 2004, 2005
+ Free Software Foundation, Inc.
+
+ Contributed by the Center for Software Science at the
+ University of Utah (pa-gdb-bugs@cs.utah.edu).
+
+This file is part of GAS, the GNU Assembler, and GDB, the GNU disassembler.
+
+GAS/GDB is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 1, or (at your option)
+any later version.
+
+GAS/GDB is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GAS or GDB; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>. */
+
+#if !defined(__STDC__) && !defined(const)
+#define const
+#endif
+
+/*
+ * Structure of an opcode table entry.
+ */
+
+/* There are two kinds of delay slot nullification: normal which is
+ * controlled by the nullification bit, and conditional, which depends
+ * on the direction of the branch and its success or failure.
+ *
+ * NONE is unfortunately #defined in the hiux system include files.
+ * #undef it away.
+ */
+#undef NONE
+struct pa_opcode
+{
+ const char *name;
+ unsigned long int match; /* Bits that must be set... */
+ unsigned long int mask; /* ... in these bits. */
+ const char *args;
+ enum pa_arch arch;
+ char flags;
+};
+
+/* Enables strict matching. Opcodes with match errors are skipped
+ when this bit is set. */
+#define FLAG_STRICT 0x1
+
+/*
+ All hppa opcodes are 32 bits.
+
+ The match component is a mask saying which bits must match a
+ particular opcode in order for an instruction to be an instance
+ of that opcode.
+
+ The args component is a string containing one character for each operand of
+ the instruction. Characters used as a prefix allow any second character to
+ be used without conflicting with the main operand characters.
+
+ Bit positions in this description follow HP usage of lsb = 31,
+ "at" is lsb of field.
+
+ In the args field, the following characters must match exactly:
+
+ '+,() '
+
+ In the args field, the following characters are unused:
+
+ ' " - / 34 6789:; '
+ '@ C M [\] '
+ '` e g } '
+
+ Here are all the characters:
+
+ ' !"#$%&'()*+-,./0123456789:;<=>?'
+ '@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_'
+ '`abcdefghijklmnopqrstuvwxyz{|}~ '
+
+Kinds of operands:
+ x integer register field at 15.
+ b integer register field at 10.
+ t integer register field at 31.
+ a integer register field at 10 and 15 (for PERMH)
+ 5 5 bit immediate at 15.
+ s 2 bit space specifier at 17.
+ S 3 bit space specifier at 18.
+ V 5 bit immediate value at 31
+ i 11 bit immediate value at 31
+ j 14 bit immediate value at 31
+ k 21 bit immediate value at 31
+ l 16 bit immediate value at 31 (wide mode only, unusual encoding).
+ n nullification for branch instructions
+ N nullification for spop and copr instructions
+ w 12 bit branch displacement
+ W 17 bit branch displacement (PC relative)
+ X 22 bit branch displacement (PC relative)
+ z 17 bit branch displacement (just a number, not an address)
+
+Also these:
+
+ . 2 bit shift amount at 25
+ * 4 bit shift amount at 25
+ p 5 bit shift count at 26 (to support the SHD instruction) encoded as
+ 31-p
+ ~ 6 bit shift count at 20,22:26 encoded as 63-~.
+ P 5 bit bit position at 26
+ q 6 bit bit position at 20,22:26
+ T 5 bit field length at 31 (encoded as 32-T)
+ % 6 bit field length at 23,27:31 (variable extract/deposit)
+ | 6 bit field length at 19,27:31 (fixed extract/deposit)
+ A 13 bit immediate at 18 (to support the BREAK instruction)
+ ^ like b, but describes a control register
+ ! sar (cr11) register
+ D 26 bit immediate at 31 (to support the DIAG instruction)
+ $ 9 bit immediate at 28 (to support POPBTS)
+
+ v 3 bit Special Function Unit identifier at 25
+ O 20 bit Special Function Unit operation split between 15 bits at 20
+ and 5 bits at 31
+ o 15 bit Special Function Unit operation at 20
+ 2 22 bit Special Function Unit operation split between 17 bits at 20
+ and 5 bits at 31
+ 1 15 bit Special Function Unit operation split between 10 bits at 20
+ and 5 bits at 31
+ 0 10 bit Special Function Unit operation split between 5 bits at 20
+ and 5 bits at 31
+ u 3 bit coprocessor unit identifier at 25
+ F Source Floating Point Operand Format Completer encoded 2 bits at 20
+ I Source Floating Point Operand Format Completer encoded 1 bits at 20
+ (for 0xe format FP instructions)
+ G Destination Floating Point Operand Format Completer encoded 2 bits at 18
+ H Floating Point Operand Format at 26 for 'fmpyadd' and 'fmpysub'
+ (very similar to 'F')
+
+ r 5 bit immediate value at 31 (for the break instruction)
+ (very similar to V above, except the value is unsigned instead of
+ low_sign_ext)
+ R 5 bit immediate value at 15 (for the ssm, rsm, probei instructions)
+ (same as r above, except the value is in a different location)
+ U 10 bit immediate value at 15 (for SSM, RSM on pa2.0)
+ Q 5 bit immediate value at 10 (a bit position specified in
+ the bb instruction. It's the same as r above, except the
+ value is in a different location)
+ B 5 bit immediate value at 10 (a bit position specified in
+ the bb instruction. Similar to Q, but 64 bit handling is
+ different.
+ Z %r1 -- implicit target of addil instruction.
+ L ,%r2 completer for new syntax branch
+ { Source format completer for fcnv
+ _ Destination format completer for fcnv
+ h cbit for fcmp
+ = gfx tests for ftest
+ d 14 bit offset for single precision FP long load/store.
+ # 14 bit offset for double precision FP load long/store.
+ J Yet another 14 bit offset for load/store with ma,mb completers.
+ K Yet another 14 bit offset for load/store with ma,mb completers.
+ y 16 bit offset for word aligned load/store (PA2.0 wide).
+ & 16 bit offset for dword aligned load/store (PA2.0 wide).
+ < 16 bit offset for load/store with ma,mb completers (PA2.0 wide).
+ > 16 bit offset for load/store with ma,mb completers (PA2.0 wide).
+ Y %sr0,%r31 -- implicit target of be,l instruction.
+ @ implicit immediate value of 0
+
+Completer operands all have 'c' as the prefix:
+
+ cx indexed load and store completer.
+ cX indexed load and store completer. Like cx, but emits a space
+ after in disassembler.
+ cm short load and store completer.
+ cM short load and store completer. Like cm, but emits a space
+ after in disassembler.
+ cq long load and store completer (like cm, but inserted into a
+ different location in the target instruction).
+ cs store bytes short completer.
+ cA store bytes short completer. Like cs, but emits a space
+ after in disassembler.
+ ce long load/store completer for LDW/STW with a different encoding
+ than the others
+ cc load cache control hint
+ cd load and clear cache control hint
+ cC store cache control hint
+ co ordered access
+
+ cp branch link and push completer
+ cP branch pop completer
+ cl branch link completer
+ cg branch gate completer
+
+ cw read/write completer for PROBE
+ cW wide completer for MFCTL
+ cL local processor completer for cache control
+ cZ System Control Completer (to support LPA, LHA, etc.)
+
+ ci correction completer for DCOR
+ ca add completer
+ cy 32 bit add carry completer
+ cY 64 bit add carry completer
+ cv signed overflow trap completer
+ ct trap on condition completer for ADDI, SUB
+ cT trap on condition completer for UADDCM
+ cb 32 bit borrow completer for SUB
+ cB 64 bit borrow completer for SUB
+
+ ch left/right half completer
+ cH signed/unsigned saturation completer
+ cS signed/unsigned completer at 21
+ cz zero/sign extension completer.
+ c* permutation completer
+
+Condition operands all have '?' as the prefix:
+
+ ?f Floating point compare conditions (encoded as 5 bits at 31)
+
+ ?a add conditions
+ ?A 64 bit add conditions
+ ?@ add branch conditions followed by nullify
+ ?d non-negated add branch conditions
+ ?D negated add branch conditions
+ ?w wide mode non-negated add branch conditions
+ ?W wide mode negated add branch conditions
+
+ ?s compare/subtract conditions
+ ?S 64 bit compare/subtract conditions
+ ?t non-negated compare and branch conditions
+ ?n 32 bit compare and branch conditions followed by nullify
+ ?N 64 bit compare and branch conditions followed by nullify
+ ?Q 64 bit compare and branch conditions for CMPIB instruction
+
+ ?l logical conditions
+ ?L 64 bit logical conditions
+
+ ?b branch on bit conditions
+ ?B 64 bit branch on bit conditions
+
+ ?x shift/extract/deposit conditions
+ ?X 64 bit shift/extract/deposit conditions
+ ?y shift/extract/deposit conditions followed by nullify for conditional
+ branches
+
+ ?u unit conditions
+ ?U 64 bit unit conditions
+
+Floating point registers all have 'f' as a prefix:
+
+ ft target register at 31
+ fT target register with L/R halves at 31
+ fa operand 1 register at 10
+ fA operand 1 register with L/R halves at 10
+ fX Same as fA, except prints a space before register during disasm
+ fb operand 2 register at 15
+ fB operand 2 register with L/R halves at 15
+ fC operand 3 register with L/R halves at 16:18,21:23
+ fe Like fT, but encoding is different.
+ fE Same as fe, except prints a space before register during disasm.
+ fx target register at 15 (only for PA 2.0 long format FLDD/FSTD).
+
+Float registers for fmpyadd and fmpysub:
+
+ fi mult operand 1 register at 10
+ fj mult operand 2 register at 15
+ fk mult target register at 20
+ fl add/sub operand register at 25
+ fm add/sub target register at 31
+
+*/
+
+
+#if 0
+/* List of characters not to put a space after. Note that
+ "," is included, as the "spopN" operations use literal
+ commas in their completer sections. */
+static const char *const completer_chars = ",CcY<>?!@+&U~FfGHINnOoZMadu|/=0123%e$m}";
+#endif
+
+/* The order of the opcodes in this table is significant:
+
+ * The assembler requires that all instances of the same mnemonic be
+ consecutive. If they aren't, the assembler will bomb at runtime.
+
+ * Immediate fields use pa_get_absolute_expression to parse the
+ string. It will generate a "bad expression" error if passed
+ a register name. Thus, register index variants of an opcode
+ need to precede immediate variants.
+
+ * The disassembler does not care about the order of the opcodes
+ except in cases where implicit addressing is used.
+
+ Here are the rules for ordering the opcodes of a mnemonic:
+
+ 1) Opcodes with FLAG_STRICT should precede opcodes without
+ FLAG_STRICT.
+
+ 2) Opcodes with FLAG_STRICT should be ordered as follows:
+ register index opcodes, short immediate opcodes, and finally
+ long immediate opcodes. When both pa10 and pa11 variants
+ of the same opcode are available, the pa10 opcode should
+ come first for correct architectural promotion.
+
+ 3) When implicit addressing is available for an opcode, the
+ implicit opcode should precede the explicit opcode.
+
+ 4) Opcodes without FLAG_STRICT should be ordered as follows:
+ register index opcodes, long immediate opcodes, and finally
+ short immediate opcodes. */
+
+static const struct pa_opcode pa_opcodes[] =
+{
+
+/* Pseudo-instructions. */
+
+{ "ldi", 0x34000000, 0xffe00000, "l,x", pa20w, 0},/* ldo val(r0),r */
+{ "ldi", 0x34000000, 0xffe0c000, "j,x", pa10, 0},/* ldo val(r0),r */
+
+{ "cmpib", 0xec000000, 0xfc000000, "?Qn5,b,w", pa20, FLAG_STRICT},
+{ "cmpib", 0x84000000, 0xf4000000, "?nn5,b,w", pa10, FLAG_STRICT},
+{ "comib", 0x84000000, 0xfc000000, "?nn5,b,w", pa10, 0}, /* comib{tf}*/
+/* This entry is for the disassembler only. It will never be used by
+ assembler. */
+{ "comib", 0x8c000000, 0xfc000000, "?nn5,b,w", pa10, 0}, /* comib{tf}*/
+{ "cmpb", 0x9c000000, 0xdc000000, "?Nnx,b,w", pa20, FLAG_STRICT},
+{ "cmpb", 0x80000000, 0xf4000000, "?nnx,b,w", pa10, FLAG_STRICT},
+{ "comb", 0x80000000, 0xfc000000, "?nnx,b,w", pa10, 0}, /* comb{tf} */
+/* This entry is for the disassembler only. It will never be used by
+ assembler. */
+{ "comb", 0x88000000, 0xfc000000, "?nnx,b,w", pa10, 0}, /* comb{tf} */
+{ "addb", 0xa0000000, 0xf4000000, "?Wnx,b,w", pa20w, FLAG_STRICT},
+{ "addb", 0xa0000000, 0xfc000000, "?@nx,b,w", pa10, 0}, /* addb{tf} */
+/* This entry is for the disassembler only. It will never be used by
+ assembler. */
+{ "addb", 0xa8000000, 0xfc000000, "?@nx,b,w", pa10, 0},
+{ "addib", 0xa4000000, 0xf4000000, "?Wn5,b,w", pa20w, FLAG_STRICT},
+{ "addib", 0xa4000000, 0xfc000000, "?@n5,b,w", pa10, 0}, /* addib{tf}*/
+/* This entry is for the disassembler only. It will never be used by
+ assembler. */
+{ "addib", 0xac000000, 0xfc000000, "?@n5,b,w", pa10, 0}, /* addib{tf}*/
+{ "nop", 0x08000240, 0xffffffff, "", pa10, 0}, /* or 0,0,0 */
+{ "copy", 0x08000240, 0xffe0ffe0, "x,t", pa10, 0}, /* or r,0,t */
+{ "mtsar", 0x01601840, 0xffe0ffff, "x", pa10, 0}, /* mtctl r,cr11 */
+
+/* Loads and Stores for integer registers. */
+
+{ "ldd", 0x0c0000c0, 0xfc00d3c0, "cxccx(b),t", pa20, FLAG_STRICT},
+{ "ldd", 0x0c0000c0, 0xfc0013c0, "cxccx(s,b),t", pa20, FLAG_STRICT},
+{ "ldd", 0x0c0010e0, 0xfc1ff3e0, "cocc@(b),t", pa20, FLAG_STRICT},
+{ "ldd", 0x0c0010e0, 0xfc1f33e0, "cocc@(s,b),t", pa20, FLAG_STRICT},
+{ "ldd", 0x0c0010c0, 0xfc00d3c0, "cmcc5(b),t", pa20, FLAG_STRICT},
+{ "ldd", 0x0c0010c0, 0xfc0013c0, "cmcc5(s,b),t", pa20, FLAG_STRICT},
+{ "ldd", 0x50000000, 0xfc000002, "cq&(b),x", pa20w, FLAG_STRICT},
+{ "ldd", 0x50000000, 0xfc00c002, "cq#(b),x", pa20, FLAG_STRICT},
+{ "ldd", 0x50000000, 0xfc000002, "cq#(s,b),x", pa20, FLAG_STRICT},
+{ "ldw", 0x0c000080, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldw", 0x0c000080, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldw", 0x0c000080, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldw", 0x0c000080, 0xfc0013c0, "cxccx(s,b),t", pa11, FLAG_STRICT},
+{ "ldw", 0x0c0010a0, 0xfc1ff3e0, "cocc@(b),t", pa20, FLAG_STRICT},
+{ "ldw", 0x0c0010a0, 0xfc1f33e0, "cocc@(s,b),t", pa20, FLAG_STRICT},
+{ "ldw", 0x0c001080, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldw", 0x0c001080, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldw", 0x0c001080, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldw", 0x0c001080, 0xfc0013c0, "cmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "ldw", 0x4c000000, 0xfc000000, "ce<(b),x", pa20w, FLAG_STRICT},
+{ "ldw", 0x5c000004, 0xfc000006, "ce>(b),x", pa20w, FLAG_STRICT},
+{ "ldw", 0x48000000, 0xfc000000, "l(b),x", pa20w, FLAG_STRICT},
+{ "ldw", 0x5c000004, 0xfc00c006, "ceK(b),x", pa20, FLAG_STRICT},
+{ "ldw", 0x5c000004, 0xfc000006, "ceK(s,b),x", pa20, FLAG_STRICT},
+{ "ldw", 0x4c000000, 0xfc00c000, "ceJ(b),x", pa10, FLAG_STRICT},
+{ "ldw", 0x4c000000, 0xfc000000, "ceJ(s,b),x", pa10, FLAG_STRICT},
+{ "ldw", 0x48000000, 0xfc00c000, "j(b),x", pa10, 0},
+{ "ldw", 0x48000000, 0xfc000000, "j(s,b),x", pa10, 0},
+{ "ldh", 0x0c000040, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldh", 0x0c000040, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldh", 0x0c000040, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldh", 0x0c000040, 0xfc0013c0, "cxccx(s,b),t", pa11, FLAG_STRICT},
+{ "ldh", 0x0c001060, 0xfc1ff3e0, "cocc@(b),t", pa20, FLAG_STRICT},
+{ "ldh", 0x0c001060, 0xfc1f33e0, "cocc@(s,b),t", pa20, FLAG_STRICT},
+{ "ldh", 0x0c001040, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldh", 0x0c001040, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldh", 0x0c001040, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldh", 0x0c001040, 0xfc0013c0, "cmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "ldh", 0x44000000, 0xfc000000, "l(b),x", pa20w, FLAG_STRICT},
+{ "ldh", 0x44000000, 0xfc00c000, "j(b),x", pa10, 0},
+{ "ldh", 0x44000000, 0xfc000000, "j(s,b),x", pa10, 0},
+{ "ldb", 0x0c000000, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldb", 0x0c000000, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldb", 0x0c000000, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldb", 0x0c000000, 0xfc0013c0, "cxccx(s,b),t", pa11, FLAG_STRICT},
+{ "ldb", 0x0c001020, 0xfc1ff3e0, "cocc@(b),t", pa20, FLAG_STRICT},
+{ "ldb", 0x0c001020, 0xfc1f33e0, "cocc@(s,b),t", pa20, FLAG_STRICT},
+{ "ldb", 0x0c001000, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldb", 0x0c001000, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldb", 0x0c001000, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldb", 0x0c001000, 0xfc0013c0, "cmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "ldb", 0x40000000, 0xfc000000, "l(b),x", pa20w, FLAG_STRICT},
+{ "ldb", 0x40000000, 0xfc00c000, "j(b),x", pa10, 0},
+{ "ldb", 0x40000000, 0xfc000000, "j(s,b),x", pa10, 0},
+{ "std", 0x0c0012e0, 0xfc00f3ff, "cocCx,@(b)", pa20, FLAG_STRICT},
+{ "std", 0x0c0012e0, 0xfc0033ff, "cocCx,@(s,b)", pa20, FLAG_STRICT},
+{ "std", 0x0c0012c0, 0xfc00d3c0, "cmcCx,V(b)", pa20, FLAG_STRICT},
+{ "std", 0x0c0012c0, 0xfc0013c0, "cmcCx,V(s,b)", pa20, FLAG_STRICT},
+{ "std", 0x70000000, 0xfc000002, "cqx,&(b)", pa20w, FLAG_STRICT},
+{ "std", 0x70000000, 0xfc00c002, "cqx,#(b)", pa20, FLAG_STRICT},
+{ "std", 0x70000000, 0xfc000002, "cqx,#(s,b)", pa20, FLAG_STRICT},
+{ "stw", 0x0c0012a0, 0xfc00f3ff, "cocCx,@(b)", pa20, FLAG_STRICT},
+{ "stw", 0x0c0012a0, 0xfc0033ff, "cocCx,@(s,b)", pa20, FLAG_STRICT},
+{ "stw", 0x0c001280, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "stw", 0x0c001280, 0xfc001fc0, "cMx,V(s,b)", pa10, FLAG_STRICT},
+{ "stw", 0x0c001280, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "stw", 0x0c001280, 0xfc0013c0, "cmcCx,V(s,b)", pa11, FLAG_STRICT},
+{ "stw", 0x6c000000, 0xfc000000, "cex,<(b)", pa20w, FLAG_STRICT},
+{ "stw", 0x7c000004, 0xfc000006, "cex,>(b)", pa20w, FLAG_STRICT},
+{ "stw", 0x68000000, 0xfc000000, "x,l(b)", pa20w, FLAG_STRICT},
+{ "stw", 0x7c000004, 0xfc00c006, "cex,K(b)", pa20, FLAG_STRICT},
+{ "stw", 0x7c000004, 0xfc000006, "cex,K(s,b)", pa20, FLAG_STRICT},
+{ "stw", 0x6c000000, 0xfc00c000, "cex,J(b)", pa10, FLAG_STRICT},
+{ "stw", 0x6c000000, 0xfc000000, "cex,J(s,b)", pa10, FLAG_STRICT},
+{ "stw", 0x68000000, 0xfc00c000, "x,j(b)", pa10, 0},
+{ "stw", 0x68000000, 0xfc000000, "x,j(s,b)", pa10, 0},
+{ "sth", 0x0c001260, 0xfc00f3ff, "cocCx,@(b)", pa20, FLAG_STRICT},
+{ "sth", 0x0c001260, 0xfc0033ff, "cocCx,@(s,b)", pa20, FLAG_STRICT},
+{ "sth", 0x0c001240, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "sth", 0x0c001240, 0xfc001fc0, "cMx,V(s,b)", pa10, FLAG_STRICT},
+{ "sth", 0x0c001240, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "sth", 0x0c001240, 0xfc0013c0, "cmcCx,V(s,b)", pa11, FLAG_STRICT},
+{ "sth", 0x64000000, 0xfc000000, "x,l(b)", pa20w, FLAG_STRICT},
+{ "sth", 0x64000000, 0xfc00c000, "x,j(b)", pa10, 0},
+{ "sth", 0x64000000, 0xfc000000, "x,j(s,b)", pa10, 0},
+{ "stb", 0x0c001220, 0xfc00f3ff, "cocCx,@(b)", pa20, FLAG_STRICT},
+{ "stb", 0x0c001220, 0xfc0033ff, "cocCx,@(s,b)", pa20, FLAG_STRICT},
+{ "stb", 0x0c001200, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "stb", 0x0c001200, 0xfc001fc0, "cMx,V(s,b)", pa10, FLAG_STRICT},
+{ "stb", 0x0c001200, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "stb", 0x0c001200, 0xfc0013c0, "cmcCx,V(s,b)", pa11, FLAG_STRICT},
+{ "stb", 0x60000000, 0xfc000000, "x,l(b)", pa20w, FLAG_STRICT},
+{ "stb", 0x60000000, 0xfc00c000, "x,j(b)", pa10, 0},
+{ "stb", 0x60000000, 0xfc000000, "x,j(s,b)", pa10, 0},
+{ "ldwm", 0x4c000000, 0xfc00c000, "j(b),x", pa10, 0},
+{ "ldwm", 0x4c000000, 0xfc000000, "j(s,b),x", pa10, 0},
+{ "stwm", 0x6c000000, 0xfc00c000, "x,j(b)", pa10, 0},
+{ "stwm", 0x6c000000, 0xfc000000, "x,j(s,b)", pa10, 0},
+{ "ldwx", 0x0c000080, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldwx", 0x0c000080, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldwx", 0x0c000080, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldwx", 0x0c000080, 0xfc0013c0, "cxccx(s,b),t", pa11, FLAG_STRICT},
+{ "ldwx", 0x0c000080, 0xfc00dfc0, "cXx(b),t", pa10, 0},
+{ "ldwx", 0x0c000080, 0xfc001fc0, "cXx(s,b),t", pa10, 0},
+{ "ldhx", 0x0c000040, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldhx", 0x0c000040, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldhx", 0x0c000040, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldhx", 0x0c000040, 0xfc0013c0, "cxccx(s,b),t", pa11, FLAG_STRICT},
+{ "ldhx", 0x0c000040, 0xfc00dfc0, "cXx(b),t", pa10, 0},
+{ "ldhx", 0x0c000040, 0xfc001fc0, "cXx(s,b),t", pa10, 0},
+{ "ldbx", 0x0c000000, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldbx", 0x0c000000, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldbx", 0x0c000000, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldbx", 0x0c000000, 0xfc0013c0, "cxccx(s,b),t", pa11, FLAG_STRICT},
+{ "ldbx", 0x0c000000, 0xfc00dfc0, "cXx(b),t", pa10, 0},
+{ "ldbx", 0x0c000000, 0xfc001fc0, "cXx(s,b),t", pa10, 0},
+{ "ldwa", 0x0c000180, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldwa", 0x0c000180, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldwa", 0x0c0011a0, 0xfc1ff3e0, "cocc@(b),t", pa20, FLAG_STRICT},
+{ "ldwa", 0x0c001180, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldwa", 0x0c001180, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldcw", 0x0c0001c0, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldcw", 0x0c0001c0, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldcw", 0x0c0001c0, 0xfc00d3c0, "cxcdx(b),t", pa11, FLAG_STRICT},
+{ "ldcw", 0x0c0001c0, 0xfc0013c0, "cxcdx(s,b),t", pa11, FLAG_STRICT},
+{ "ldcw", 0x0c0011c0, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldcw", 0x0c0011c0, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldcw", 0x0c0011c0, 0xfc00d3c0, "cmcd5(b),t", pa11, FLAG_STRICT},
+{ "ldcw", 0x0c0011c0, 0xfc0013c0, "cmcd5(s,b),t", pa11, FLAG_STRICT},
+{ "stwa", 0x0c0013a0, 0xfc00d3ff, "cocCx,@(b)", pa20, FLAG_STRICT},
+{ "stwa", 0x0c001380, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "stwa", 0x0c001380, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "stby", 0x0c001300, 0xfc00dfc0, "cAx,V(b)", pa10, FLAG_STRICT},
+{ "stby", 0x0c001300, 0xfc001fc0, "cAx,V(s,b)", pa10, FLAG_STRICT},
+{ "stby", 0x0c001300, 0xfc00d3c0, "cscCx,V(b)", pa11, FLAG_STRICT},
+{ "stby", 0x0c001300, 0xfc0013c0, "cscCx,V(s,b)", pa11, FLAG_STRICT},
+{ "ldda", 0x0c000100, 0xfc00d3c0, "cxccx(b),t", pa20, FLAG_STRICT},
+{ "ldda", 0x0c001120, 0xfc1ff3e0, "cocc@(b),t", pa20, FLAG_STRICT},
+{ "ldda", 0x0c001100, 0xfc00d3c0, "cmcc5(b),t", pa20, FLAG_STRICT},
+{ "ldcd", 0x0c000140, 0xfc00d3c0, "cxcdx(b),t", pa20, FLAG_STRICT},
+{ "ldcd", 0x0c000140, 0xfc0013c0, "cxcdx(s,b),t", pa20, FLAG_STRICT},
+{ "ldcd", 0x0c001140, 0xfc00d3c0, "cmcd5(b),t", pa20, FLAG_STRICT},
+{ "ldcd", 0x0c001140, 0xfc0013c0, "cmcd5(s,b),t", pa20, FLAG_STRICT},
+{ "stda", 0x0c0013e0, 0xfc00f3ff, "cocCx,@(b)", pa20, FLAG_STRICT},
+{ "stda", 0x0c0013c0, 0xfc00d3c0, "cmcCx,V(b)", pa20, FLAG_STRICT},
+{ "ldwax", 0x0c000180, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldwax", 0x0c000180, 0xfc00d3c0, "cxccx(b),t", pa11, FLAG_STRICT},
+{ "ldwax", 0x0c000180, 0xfc00dfc0, "cXx(b),t", pa10, 0},
+{ "ldcwx", 0x0c0001c0, 0xfc00dfc0, "cXx(b),t", pa10, FLAG_STRICT},
+{ "ldcwx", 0x0c0001c0, 0xfc001fc0, "cXx(s,b),t", pa10, FLAG_STRICT},
+{ "ldcwx", 0x0c0001c0, 0xfc00d3c0, "cxcdx(b),t", pa11, FLAG_STRICT},
+{ "ldcwx", 0x0c0001c0, 0xfc0013c0, "cxcdx(s,b),t", pa11, FLAG_STRICT},
+{ "ldcwx", 0x0c0001c0, 0xfc00dfc0, "cXx(b),t", pa10, 0},
+{ "ldcwx", 0x0c0001c0, 0xfc001fc0, "cXx(s,b),t", pa10, 0},
+{ "ldws", 0x0c001080, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldws", 0x0c001080, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldws", 0x0c001080, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldws", 0x0c001080, 0xfc0013c0, "cmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "ldws", 0x0c001080, 0xfc00dfc0, "cM5(b),t", pa10, 0},
+{ "ldws", 0x0c001080, 0xfc001fc0, "cM5(s,b),t", pa10, 0},
+{ "ldhs", 0x0c001040, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldhs", 0x0c001040, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldhs", 0x0c001040, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldhs", 0x0c001040, 0xfc0013c0, "cmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "ldhs", 0x0c001040, 0xfc00dfc0, "cM5(b),t", pa10, 0},
+{ "ldhs", 0x0c001040, 0xfc001fc0, "cM5(s,b),t", pa10, 0},
+{ "ldbs", 0x0c001000, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldbs", 0x0c001000, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldbs", 0x0c001000, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldbs", 0x0c001000, 0xfc0013c0, "cmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "ldbs", 0x0c001000, 0xfc00dfc0, "cM5(b),t", pa10, 0},
+{ "ldbs", 0x0c001000, 0xfc001fc0, "cM5(s,b),t", pa10, 0},
+{ "ldwas", 0x0c001180, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldwas", 0x0c001180, 0xfc00d3c0, "cmcc5(b),t", pa11, FLAG_STRICT},
+{ "ldwas", 0x0c001180, 0xfc00dfc0, "cM5(b),t", pa10, 0},
+{ "ldcws", 0x0c0011c0, 0xfc00dfc0, "cM5(b),t", pa10, FLAG_STRICT},
+{ "ldcws", 0x0c0011c0, 0xfc001fc0, "cM5(s,b),t", pa10, FLAG_STRICT},
+{ "ldcws", 0x0c0011c0, 0xfc00d3c0, "cmcd5(b),t", pa11, FLAG_STRICT},
+{ "ldcws", 0x0c0011c0, 0xfc0013c0, "cmcd5(s,b),t", pa11, FLAG_STRICT},
+{ "ldcws", 0x0c0011c0, 0xfc00dfc0, "cM5(b),t", pa10, 0},
+{ "ldcws", 0x0c0011c0, 0xfc001fc0, "cM5(s,b),t", pa10, 0},
+{ "stws", 0x0c001280, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "stws", 0x0c001280, 0xfc001fc0, "cMx,V(s,b)", pa10, FLAG_STRICT},
+{ "stws", 0x0c001280, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "stws", 0x0c001280, 0xfc0013c0, "cmcCx,V(s,b)", pa11, FLAG_STRICT},
+{ "stws", 0x0c001280, 0xfc00dfc0, "cMx,V(b)", pa10, 0},
+{ "stws", 0x0c001280, 0xfc001fc0, "cMx,V(s,b)", pa10, 0},
+{ "sths", 0x0c001240, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "sths", 0x0c001240, 0xfc001fc0, "cMx,V(s,b)", pa10, FLAG_STRICT},
+{ "sths", 0x0c001240, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "sths", 0x0c001240, 0xfc0013c0, "cmcCx,V(s,b)", pa11, FLAG_STRICT},
+{ "sths", 0x0c001240, 0xfc00dfc0, "cMx,V(b)", pa10, 0},
+{ "sths", 0x0c001240, 0xfc001fc0, "cMx,V(s,b)", pa10, 0},
+{ "stbs", 0x0c001200, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "stbs", 0x0c001200, 0xfc001fc0, "cMx,V(s,b)", pa10, FLAG_STRICT},
+{ "stbs", 0x0c001200, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "stbs", 0x0c001200, 0xfc0013c0, "cmcCx,V(s,b)", pa11, FLAG_STRICT},
+{ "stbs", 0x0c001200, 0xfc00dfc0, "cMx,V(b)", pa10, 0},
+{ "stbs", 0x0c001200, 0xfc001fc0, "cMx,V(s,b)", pa10, 0},
+{ "stwas", 0x0c001380, 0xfc00dfc0, "cMx,V(b)", pa10, FLAG_STRICT},
+{ "stwas", 0x0c001380, 0xfc00d3c0, "cmcCx,V(b)", pa11, FLAG_STRICT},
+{ "stwas", 0x0c001380, 0xfc00dfc0, "cMx,V(b)", pa10, 0},
+{ "stdby", 0x0c001340, 0xfc00d3c0, "cscCx,V(b)", pa20, FLAG_STRICT},
+{ "stdby", 0x0c001340, 0xfc0013c0, "cscCx,V(s,b)", pa20, FLAG_STRICT},
+{ "stbys", 0x0c001300, 0xfc00dfc0, "cAx,V(b)", pa10, FLAG_STRICT},
+{ "stbys", 0x0c001300, 0xfc001fc0, "cAx,V(s,b)", pa10, FLAG_STRICT},
+{ "stbys", 0x0c001300, 0xfc00d3c0, "cscCx,V(b)", pa11, FLAG_STRICT},
+{ "stbys", 0x0c001300, 0xfc0013c0, "cscCx,V(s,b)", pa11, FLAG_STRICT},
+{ "stbys", 0x0c001300, 0xfc00dfc0, "cAx,V(b)", pa10, 0},
+{ "stbys", 0x0c001300, 0xfc001fc0, "cAx,V(s,b)", pa10, 0},
+
+/* Immediate instructions. */
+{ "ldo", 0x34000000, 0xfc000000, "l(b),x", pa20w, 0},
+{ "ldo", 0x34000000, 0xfc00c000, "j(b),x", pa10, 0},
+{ "ldil", 0x20000000, 0xfc000000, "k,b", pa10, 0},
+{ "addil", 0x28000000, 0xfc000000, "k,b,Z", pa10, 0},
+{ "addil", 0x28000000, 0xfc000000, "k,b", pa10, 0},
+
+/* Branching instructions. */
+{ "b", 0xe8008000, 0xfc00e000, "cpnXL", pa20, FLAG_STRICT},
+{ "b", 0xe800a000, 0xfc00e000, "clnXL", pa20, FLAG_STRICT},
+{ "b", 0xe8000000, 0xfc00e000, "clnW,b", pa10, FLAG_STRICT},
+{ "b", 0xe8002000, 0xfc00e000, "cgnW,b", pa10, FLAG_STRICT},
+{ "b", 0xe8000000, 0xffe0e000, "nW", pa10, 0}, /* b,l foo,r0 */
+{ "bl", 0xe8000000, 0xfc00e000, "nW,b", pa10, 0},
+{ "gate", 0xe8002000, 0xfc00e000, "nW,b", pa10, 0},
+{ "blr", 0xe8004000, 0xfc00e001, "nx,b", pa10, 0},
+{ "bv", 0xe800c000, 0xfc00fffd, "nx(b)", pa10, 0},
+{ "bv", 0xe800c000, 0xfc00fffd, "n(b)", pa10, 0},
+{ "bve", 0xe800f001, 0xfc1ffffd, "cpn(b)L", pa20, FLAG_STRICT},
+{ "bve", 0xe800f000, 0xfc1ffffd, "cln(b)L", pa20, FLAG_STRICT},
+{ "bve", 0xe800d001, 0xfc1ffffd, "cPn(b)", pa20, FLAG_STRICT},
+{ "bve", 0xe800d000, 0xfc1ffffd, "n(b)", pa20, FLAG_STRICT},
+{ "be", 0xe4000000, 0xfc000000, "clnz(S,b),Y", pa10, FLAG_STRICT},
+{ "be", 0xe4000000, 0xfc000000, "clnz(b),Y", pa10, FLAG_STRICT},
+{ "be", 0xe0000000, 0xfc000000, "nz(S,b)", pa10, 0},
+{ "be", 0xe0000000, 0xfc000000, "nz(b)", pa10, 0},
+{ "ble", 0xe4000000, 0xfc000000, "nz(S,b)", pa10, 0},
+{ "movb", 0xc8000000, 0xfc000000, "?ynx,b,w", pa10, 0},
+{ "movib", 0xcc000000, 0xfc000000, "?yn5,b,w", pa10, 0},
+{ "combt", 0x80000000, 0xfc000000, "?tnx,b,w", pa10, 0},
+{ "combf", 0x88000000, 0xfc000000, "?tnx,b,w", pa10, 0},
+{ "comibt", 0x84000000, 0xfc000000, "?tn5,b,w", pa10, 0},
+{ "comibf", 0x8c000000, 0xfc000000, "?tn5,b,w", pa10, 0},
+{ "addbt", 0xa0000000, 0xfc000000, "?dnx,b,w", pa10, 0},
+{ "addbf", 0xa8000000, 0xfc000000, "?dnx,b,w", pa10, 0},
+{ "addibt", 0xa4000000, 0xfc000000, "?dn5,b,w", pa10, 0},
+{ "addibf", 0xac000000, 0xfc000000, "?dn5,b,w", pa10, 0},
+{ "bb", 0xc0004000, 0xffe06000, "?bnx,!,w", pa10, FLAG_STRICT},
+{ "bb", 0xc0006000, 0xffe06000, "?Bnx,!,w", pa20, FLAG_STRICT},
+{ "bb", 0xc4004000, 0xfc006000, "?bnx,Q,w", pa10, FLAG_STRICT},
+{ "bb", 0xc4004000, 0xfc004000, "?Bnx,B,w", pa20, FLAG_STRICT},
+{ "bvb", 0xc0004000, 0xffe04000, "?bnx,w", pa10, 0},
+{ "clrbts", 0xe8004005, 0xffffffff, "", pa20, FLAG_STRICT},
+{ "popbts", 0xe8004005, 0xfffff007, "$", pa20, FLAG_STRICT},
+{ "pushnom", 0xe8004001, 0xffffffff, "", pa20, FLAG_STRICT},
+{ "pushbts", 0xe8004001, 0xffe0ffff, "x", pa20, FLAG_STRICT},
+
+/* Computation Instructions. */
+
+{ "cmpclr", 0x080008a0, 0xfc000fe0, "?Sx,b,t", pa20, FLAG_STRICT},
+{ "cmpclr", 0x08000880, 0xfc000fe0, "?sx,b,t", pa10, FLAG_STRICT},
+{ "comclr", 0x08000880, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "or", 0x08000260, 0xfc000fe0, "?Lx,b,t", pa20, FLAG_STRICT},
+{ "or", 0x08000240, 0xfc000fe0, "?lx,b,t", pa10, 0},
+{ "xor", 0x080002a0, 0xfc000fe0, "?Lx,b,t", pa20, FLAG_STRICT},
+{ "xor", 0x08000280, 0xfc000fe0, "?lx,b,t", pa10, 0},
+{ "and", 0x08000220, 0xfc000fe0, "?Lx,b,t", pa20, FLAG_STRICT},
+{ "and", 0x08000200, 0xfc000fe0, "?lx,b,t", pa10, 0},
+{ "andcm", 0x08000020, 0xfc000fe0, "?Lx,b,t", pa20, FLAG_STRICT},
+{ "andcm", 0x08000000, 0xfc000fe0, "?lx,b,t", pa10, 0},
+{ "uxor", 0x080003a0, 0xfc000fe0, "?Ux,b,t", pa20, FLAG_STRICT},
+{ "uxor", 0x08000380, 0xfc000fe0, "?ux,b,t", pa10, 0},
+{ "uaddcm", 0x080009a0, 0xfc000fa0, "cT?Ux,b,t", pa20, FLAG_STRICT},
+{ "uaddcm", 0x08000980, 0xfc000fa0, "cT?ux,b,t", pa10, FLAG_STRICT},
+{ "uaddcm", 0x08000980, 0xfc000fe0, "?ux,b,t", pa10, 0},
+{ "uaddcmt", 0x080009c0, 0xfc000fe0, "?ux,b,t", pa10, 0},
+{ "dcor", 0x08000ba0, 0xfc1f0fa0, "ci?Ub,t", pa20, FLAG_STRICT},
+{ "dcor", 0x08000b80, 0xfc1f0fa0, "ci?ub,t", pa10, FLAG_STRICT},
+{ "dcor", 0x08000b80, 0xfc1f0fe0, "?ub,t", pa10, 0},
+{ "idcor", 0x08000bc0, 0xfc1f0fe0, "?ub,t", pa10, 0},
+{ "addi", 0xb0000000, 0xfc000000, "ct?ai,b,x", pa10, FLAG_STRICT},
+{ "addi", 0xb4000000, 0xfc000000, "cv?ai,b,x", pa10, FLAG_STRICT},
+{ "addi", 0xb4000000, 0xfc000800, "?ai,b,x", pa10, 0},
+{ "addio", 0xb4000800, 0xfc000800, "?ai,b,x", pa10, 0},
+{ "addit", 0xb0000000, 0xfc000800, "?ai,b,x", pa10, 0},
+{ "addito", 0xb0000800, 0xfc000800, "?ai,b,x", pa10, 0},
+{ "add", 0x08000720, 0xfc0007e0, "cY?Ax,b,t", pa20, FLAG_STRICT},
+{ "add", 0x08000700, 0xfc0007e0, "cy?ax,b,t", pa10, FLAG_STRICT},
+{ "add", 0x08000220, 0xfc0003e0, "ca?Ax,b,t", pa20, FLAG_STRICT},
+{ "add", 0x08000200, 0xfc0003e0, "ca?ax,b,t", pa10, FLAG_STRICT},
+{ "add", 0x08000600, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "addl", 0x08000a00, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "addo", 0x08000e00, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "addc", 0x08000700, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "addco", 0x08000f00, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sub", 0x080004e0, 0xfc0007e0, "ct?Sx,b,t", pa20, FLAG_STRICT},
+{ "sub", 0x080004c0, 0xfc0007e0, "ct?sx,b,t", pa10, FLAG_STRICT},
+{ "sub", 0x08000520, 0xfc0007e0, "cB?Sx,b,t", pa20, FLAG_STRICT},
+{ "sub", 0x08000500, 0xfc0007e0, "cb?sx,b,t", pa10, FLAG_STRICT},
+{ "sub", 0x08000420, 0xfc0007e0, "cv?Sx,b,t", pa20, FLAG_STRICT},
+{ "sub", 0x08000400, 0xfc0007e0, "cv?sx,b,t", pa10, FLAG_STRICT},
+{ "sub", 0x08000400, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "subo", 0x08000c00, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "subb", 0x08000500, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "subbo", 0x08000d00, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "subt", 0x080004c0, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "subto", 0x08000cc0, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "ds", 0x08000440, 0xfc000fe0, "?sx,b,t", pa10, 0},
+{ "subi", 0x94000000, 0xfc000000, "cv?si,b,x", pa10, FLAG_STRICT},
+{ "subi", 0x94000000, 0xfc000800, "?si,b,x", pa10, 0},
+{ "subio", 0x94000800, 0xfc000800, "?si,b,x", pa10, 0},
+{ "cmpiclr", 0x90000800, 0xfc000800, "?Si,b,x", pa20, FLAG_STRICT},
+{ "cmpiclr", 0x90000000, 0xfc000800, "?si,b,x", pa10, FLAG_STRICT},
+{ "comiclr", 0x90000000, 0xfc000800, "?si,b,x", pa10, 0},
+{ "shladd", 0x08000220, 0xfc000320, "ca?Ax,.,b,t", pa20, FLAG_STRICT},
+{ "shladd", 0x08000200, 0xfc000320, "ca?ax,.,b,t", pa10, FLAG_STRICT},
+{ "sh1add", 0x08000640, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh1addl", 0x08000a40, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh1addo", 0x08000e40, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh2add", 0x08000680, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh2addl", 0x08000a80, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh2addo", 0x08000e80, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh3add", 0x080006c0, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh3addl", 0x08000ac0, 0xfc000fe0, "?ax,b,t", pa10, 0},
+{ "sh3addo", 0x08000ec0, 0xfc000fe0, "?ax,b,t", pa10, 0},
+
+/* Subword Operation Instructions. */
+
+{ "hadd", 0x08000300, 0xfc00ff20, "cHx,b,t", pa20, FLAG_STRICT},
+{ "havg", 0x080002c0, 0xfc00ffe0, "x,b,t", pa20, FLAG_STRICT},
+{ "hshl", 0xf8008800, 0xffe0fc20, "x,*,t", pa20, FLAG_STRICT},
+{ "hshladd", 0x08000700, 0xfc00ff20, "x,.,b,t", pa20, FLAG_STRICT},
+{ "hshr", 0xf800c800, 0xfc1ff820, "cSb,*,t", pa20, FLAG_STRICT},
+{ "hshradd", 0x08000500, 0xfc00ff20, "x,.,b,t", pa20, FLAG_STRICT},
+{ "hsub", 0x08000100, 0xfc00ff20, "cHx,b,t", pa20, FLAG_STRICT},
+{ "mixh", 0xf8008400, 0xfc009fe0, "chx,b,t", pa20, FLAG_STRICT},
+{ "mixw", 0xf8008000, 0xfc009fe0, "chx,b,t", pa20, FLAG_STRICT},
+{ "permh", 0xf8000000, 0xfc009020, "c*a,t", pa20, FLAG_STRICT},
+
+
+/* Extract and Deposit Instructions. */
+
+{ "shrpd", 0xd0000200, 0xfc001fe0, "?Xx,b,!,t", pa20, FLAG_STRICT},
+{ "shrpd", 0xd0000400, 0xfc001400, "?Xx,b,~,t", pa20, FLAG_STRICT},
+{ "shrpw", 0xd0000000, 0xfc001fe0, "?xx,b,!,t", pa10, FLAG_STRICT},
+{ "shrpw", 0xd0000800, 0xfc001c00, "?xx,b,p,t", pa10, FLAG_STRICT},
+{ "vshd", 0xd0000000, 0xfc001fe0, "?xx,b,t", pa10, 0},
+{ "shd", 0xd0000800, 0xfc001c00, "?xx,b,p,t", pa10, 0},
+{ "extrd", 0xd0001200, 0xfc001ae0, "cS?Xb,!,%,x", pa20, FLAG_STRICT},
+{ "extrd", 0xd8000000, 0xfc000000, "cS?Xb,q,|,x", pa20, FLAG_STRICT},
+{ "extrw", 0xd0001000, 0xfc001be0, "cS?xb,!,T,x", pa10, FLAG_STRICT},
+{ "extrw", 0xd0001800, 0xfc001800, "cS?xb,P,T,x", pa10, FLAG_STRICT},
+{ "vextru", 0xd0001000, 0xfc001fe0, "?xb,T,x", pa10, 0},
+{ "vextrs", 0xd0001400, 0xfc001fe0, "?xb,T,x", pa10, 0},
+{ "extru", 0xd0001800, 0xfc001c00, "?xb,P,T,x", pa10, 0},
+{ "extrs", 0xd0001c00, 0xfc001c00, "?xb,P,T,x", pa10, 0},
+{ "depd", 0xd4000200, 0xfc001ae0, "cz?Xx,!,%,b", pa20, FLAG_STRICT},
+{ "depd", 0xf0000000, 0xfc000000, "cz?Xx,~,|,b", pa20, FLAG_STRICT},
+{ "depdi", 0xd4001200, 0xfc001ae0, "cz?X5,!,%,b", pa20, FLAG_STRICT},
+{ "depdi", 0xf4000000, 0xfc000000, "cz?X5,~,|,b", pa20, FLAG_STRICT},
+{ "depw", 0xd4000000, 0xfc001be0, "cz?xx,!,T,b", pa10, FLAG_STRICT},
+{ "depw", 0xd4000800, 0xfc001800, "cz?xx,p,T,b", pa10, FLAG_STRICT},
+{ "depwi", 0xd4001000, 0xfc001be0, "cz?x5,!,T,b", pa10, FLAG_STRICT},
+{ "depwi", 0xd4001800, 0xfc001800, "cz?x5,p,T,b", pa10, FLAG_STRICT},
+{ "zvdep", 0xd4000000, 0xfc001fe0, "?xx,T,b", pa10, 0},
+{ "vdep", 0xd4000400, 0xfc001fe0, "?xx,T,b", pa10, 0},
+{ "zdep", 0xd4000800, 0xfc001c00, "?xx,p,T,b", pa10, 0},
+{ "dep", 0xd4000c00, 0xfc001c00, "?xx,p,T,b", pa10, 0},
+{ "zvdepi", 0xd4001000, 0xfc001fe0, "?x5,T,b", pa10, 0},
+{ "vdepi", 0xd4001400, 0xfc001fe0, "?x5,T,b", pa10, 0},
+{ "zdepi", 0xd4001800, 0xfc001c00, "?x5,p,T,b", pa10, 0},
+{ "depi", 0xd4001c00, 0xfc001c00, "?x5,p,T,b", pa10, 0},
+
+/* System Control Instructions. */
+
+{ "break", 0x00000000, 0xfc001fe0, "r,A", pa10, 0},
+{ "rfi", 0x00000c00, 0xffffff1f, "cr", pa10, FLAG_STRICT},
+{ "rfi", 0x00000c00, 0xffffffff, "", pa10, 0},
+{ "rfir", 0x00000ca0, 0xffffffff, "", pa11, 0},
+{ "ssm", 0x00000d60, 0xfc00ffe0, "U,t", pa20, FLAG_STRICT},
+{ "ssm", 0x00000d60, 0xffe0ffe0, "R,t", pa10, 0},
+{ "rsm", 0x00000e60, 0xfc00ffe0, "U,t", pa20, FLAG_STRICT},
+{ "rsm", 0x00000e60, 0xffe0ffe0, "R,t", pa10, 0},
+{ "mtsm", 0x00001860, 0xffe0ffff, "x", pa10, 0},
+{ "ldsid", 0x000010a0, 0xfc1fffe0, "(b),t", pa10, 0},
+{ "ldsid", 0x000010a0, 0xfc1f3fe0, "(s,b),t", pa10, 0},
+{ "mtsp", 0x00001820, 0xffe01fff, "x,S", pa10, 0},
+{ "mtctl", 0x00001840, 0xfc00ffff, "x,^", pa10, 0},
+{ "mtsarcm", 0x016018C0, 0xffe0ffff, "x", pa20, FLAG_STRICT},
+{ "mfia", 0x000014A0, 0xffffffe0, "t", pa20, FLAG_STRICT},
+{ "mfsp", 0x000004a0, 0xffff1fe0, "S,t", pa10, 0},
+{ "mfctl", 0x016048a0, 0xffffffe0, "cW!,t", pa20, FLAG_STRICT},
+{ "mfctl", 0x000008a0, 0xfc1fffe0, "^,t", pa10, 0},
+{ "sync", 0x00000400, 0xffffffff, "", pa10, 0},
+{ "syncdma", 0x00100400, 0xffffffff, "", pa10, 0},
+{ "probe", 0x04001180, 0xfc00ffa0, "cw(b),x,t", pa10, FLAG_STRICT},
+{ "probe", 0x04001180, 0xfc003fa0, "cw(s,b),x,t", pa10, FLAG_STRICT},
+{ "probei", 0x04003180, 0xfc00ffa0, "cw(b),R,t", pa10, FLAG_STRICT},
+{ "probei", 0x04003180, 0xfc003fa0, "cw(s,b),R,t", pa10, FLAG_STRICT},
+{ "prober", 0x04001180, 0xfc00ffe0, "(b),x,t", pa10, 0},
+{ "prober", 0x04001180, 0xfc003fe0, "(s,b),x,t", pa10, 0},
+{ "proberi", 0x04003180, 0xfc00ffe0, "(b),R,t", pa10, 0},
+{ "proberi", 0x04003180, 0xfc003fe0, "(s,b),R,t", pa10, 0},
+{ "probew", 0x040011c0, 0xfc00ffe0, "(b),x,t", pa10, 0},
+{ "probew", 0x040011c0, 0xfc003fe0, "(s,b),x,t", pa10, 0},
+{ "probewi", 0x040031c0, 0xfc00ffe0, "(b),R,t", pa10, 0},
+{ "probewi", 0x040031c0, 0xfc003fe0, "(s,b),R,t", pa10, 0},
+{ "lpa", 0x04001340, 0xfc00ffc0, "cZx(b),t", pa10, 0},
+{ "lpa", 0x04001340, 0xfc003fc0, "cZx(s,b),t", pa10, 0},
+{ "lci", 0x04001300, 0xfc00ffe0, "x(b),t", pa11, 0},
+{ "lci", 0x04001300, 0xfc003fe0, "x(s,b),t", pa11, 0},
+{ "pdtlb", 0x04001600, 0xfc00ffdf, "cLcZx(b)", pa20, FLAG_STRICT},
+{ "pdtlb", 0x04001600, 0xfc003fdf, "cLcZx(s,b)", pa20, FLAG_STRICT},
+{ "pdtlb", 0x04001600, 0xfc1fffdf, "cLcZ@(b)", pa20, FLAG_STRICT},
+{ "pdtlb", 0x04001600, 0xfc1f3fdf, "cLcZ@(s,b)", pa20, FLAG_STRICT},
+{ "pdtlb", 0x04001200, 0xfc00ffdf, "cZx(b)", pa10, 0},
+{ "pdtlb", 0x04001200, 0xfc003fdf, "cZx(s,b)", pa10, 0},
+{ "pitlb", 0x04000600, 0xfc001fdf, "cLcZx(S,b)", pa20, FLAG_STRICT},
+{ "pitlb", 0x04000600, 0xfc1f1fdf, "cLcZ@(S,b)", pa20, FLAG_STRICT},
+{ "pitlb", 0x04000200, 0xfc001fdf, "cZx(S,b)", pa10, 0},
+{ "pdtlbe", 0x04001240, 0xfc00ffdf, "cZx(b)", pa10, 0},
+{ "pdtlbe", 0x04001240, 0xfc003fdf, "cZx(s,b)", pa10, 0},
+{ "pitlbe", 0x04000240, 0xfc001fdf, "cZx(S,b)", pa10, 0},
+{ "idtlba", 0x04001040, 0xfc00ffff, "x,(b)", pa10, 0},
+{ "idtlba", 0x04001040, 0xfc003fff, "x,(s,b)", pa10, 0},
+{ "iitlba", 0x04000040, 0xfc001fff, "x,(S,b)", pa10, 0},
+{ "idtlbp", 0x04001000, 0xfc00ffff, "x,(b)", pa10, 0},
+{ "idtlbp", 0x04001000, 0xfc003fff, "x,(s,b)", pa10, 0},
+{ "iitlbp", 0x04000000, 0xfc001fff, "x,(S,b)", pa10, 0},
+{ "pdc", 0x04001380, 0xfc00ffdf, "cZx(b)", pa10, 0},
+{ "pdc", 0x04001380, 0xfc003fdf, "cZx(s,b)", pa10, 0},
+{ "fdc", 0x04001280, 0xfc00ffdf, "cZx(b)", pa10, FLAG_STRICT},
+{ "fdc", 0x04001280, 0xfc003fdf, "cZx(s,b)", pa10, FLAG_STRICT},
+{ "fdc", 0x04003280, 0xfc00ffff, "5(b)", pa20, FLAG_STRICT},
+{ "fdc", 0x04003280, 0xfc003fff, "5(s,b)", pa20, FLAG_STRICT},
+{ "fdc", 0x04001280, 0xfc00ffdf, "cZx(b)", pa10, 0},
+{ "fdc", 0x04001280, 0xfc003fdf, "cZx(s,b)", pa10, 0},
+{ "fic", 0x040013c0, 0xfc00dfdf, "cZx(b)", pa20, FLAG_STRICT},
+{ "fic", 0x04000280, 0xfc001fdf, "cZx(S,b)", pa10, 0},
+{ "fdce", 0x040012c0, 0xfc00ffdf, "cZx(b)", pa10, 0},
+{ "fdce", 0x040012c0, 0xfc003fdf, "cZx(s,b)", pa10, 0},
+{ "fice", 0x040002c0, 0xfc001fdf, "cZx(S,b)", pa10, 0},
+{ "diag", 0x14000000, 0xfc000000, "D", pa10, 0},
+{ "idtlbt", 0x04001800, 0xfc00ffff, "x,b", pa20, FLAG_STRICT},
+{ "iitlbt", 0x04000800, 0xfc00ffff, "x,b", pa20, FLAG_STRICT},
+
+/* These may be specific to certain versions of the PA. Joel claimed
+ they were 72000 (7200?) specific. However, I'm almost certain the
+ mtcpu/mfcpu were undocumented, but available in the older 700 machines. */
+{ "mtcpu", 0x14001600, 0xfc00ffff, "x,^", pa10, 0},
+{ "mfcpu", 0x14001A00, 0xfc00ffff, "^,x", pa10, 0},
+{ "tocen", 0x14403600, 0xffffffff, "", pa10, 0},
+{ "tocdis", 0x14401620, 0xffffffff, "", pa10, 0},
+{ "shdwgr", 0x14402600, 0xffffffff, "", pa10, 0},
+{ "grshdw", 0x14400620, 0xffffffff, "", pa10, 0},
+
+/* gfw and gfr are not in the HP PA 1.1 manual, but they are in either
+ the Timex FPU or the Mustang ERS (not sure which) manual. */
+{ "gfw", 0x04001680, 0xfc00ffdf, "cZx(b)", pa11, 0},
+{ "gfw", 0x04001680, 0xfc003fdf, "cZx(s,b)", pa11, 0},
+{ "gfr", 0x04001a80, 0xfc00ffdf, "cZx(b)", pa11, 0},
+{ "gfr", 0x04001a80, 0xfc003fdf, "cZx(s,b)", pa11, 0},
+
+/* Floating Point Coprocessor Instructions. */
+
+{ "fldw", 0x24000000, 0xfc00df80, "cXx(b),fT", pa10, FLAG_STRICT},
+{ "fldw", 0x24000000, 0xfc001f80, "cXx(s,b),fT", pa10, FLAG_STRICT},
+{ "fldw", 0x24000000, 0xfc00d380, "cxccx(b),fT", pa11, FLAG_STRICT},
+{ "fldw", 0x24000000, 0xfc001380, "cxccx(s,b),fT", pa11, FLAG_STRICT},
+{ "fldw", 0x24001020, 0xfc1ff3a0, "cocc@(b),fT", pa20, FLAG_STRICT},
+{ "fldw", 0x24001020, 0xfc1f33a0, "cocc@(s,b),fT", pa20, FLAG_STRICT},
+{ "fldw", 0x24001000, 0xfc00df80, "cM5(b),fT", pa10, FLAG_STRICT},
+{ "fldw", 0x24001000, 0xfc001f80, "cM5(s,b),fT", pa10, FLAG_STRICT},
+{ "fldw", 0x24001000, 0xfc00d380, "cmcc5(b),fT", pa11, FLAG_STRICT},
+{ "fldw", 0x24001000, 0xfc001380, "cmcc5(s,b),fT", pa11, FLAG_STRICT},
+{ "fldw", 0x5c000000, 0xfc000004, "y(b),fe", pa20w, FLAG_STRICT},
+{ "fldw", 0x58000000, 0xfc000000, "cJy(b),fe", pa20w, FLAG_STRICT},
+{ "fldw", 0x5c000000, 0xfc00c004, "d(b),fe", pa20, FLAG_STRICT},
+{ "fldw", 0x5c000000, 0xfc000004, "d(s,b),fe", pa20, FLAG_STRICT},
+{ "fldw", 0x58000000, 0xfc00c000, "cJd(b),fe", pa20, FLAG_STRICT},
+{ "fldw", 0x58000000, 0xfc000000, "cJd(s,b),fe", pa20, FLAG_STRICT},
+{ "fldd", 0x2c000000, 0xfc00dfc0, "cXx(b),ft", pa10, FLAG_STRICT},
+{ "fldd", 0x2c000000, 0xfc001fc0, "cXx(s,b),ft", pa10, FLAG_STRICT},
+{ "fldd", 0x2c000000, 0xfc00d3c0, "cxccx(b),ft", pa11, FLAG_STRICT},
+{ "fldd", 0x2c000000, 0xfc0013c0, "cxccx(s,b),ft", pa11, FLAG_STRICT},
+{ "fldd", 0x2c001020, 0xfc1ff3e0, "cocc@(b),ft", pa20, FLAG_STRICT},
+{ "fldd", 0x2c001020, 0xfc1f33e0, "cocc@(s,b),ft", pa20, FLAG_STRICT},
+{ "fldd", 0x2c001000, 0xfc00dfc0, "cM5(b),ft", pa10, FLAG_STRICT},
+{ "fldd", 0x2c001000, 0xfc001fc0, "cM5(s,b),ft", pa10, FLAG_STRICT},
+{ "fldd", 0x2c001000, 0xfc00d3c0, "cmcc5(b),ft", pa11, FLAG_STRICT},
+{ "fldd", 0x2c001000, 0xfc0013c0, "cmcc5(s,b),ft", pa11, FLAG_STRICT},
+{ "fldd", 0x50000002, 0xfc000002, "cq&(b),fx", pa20w, FLAG_STRICT},
+{ "fldd", 0x50000002, 0xfc00c002, "cq#(b),fx", pa20, FLAG_STRICT},
+{ "fldd", 0x50000002, 0xfc000002, "cq#(s,b),fx", pa20, FLAG_STRICT},
+{ "fstw", 0x24000200, 0xfc00df80, "cXfT,x(b)", pa10, FLAG_STRICT},
+{ "fstw", 0x24000200, 0xfc001f80, "cXfT,x(s,b)", pa10, FLAG_STRICT},
+{ "fstw", 0x24000200, 0xfc00d380, "cxcCfT,x(b)", pa11, FLAG_STRICT},
+{ "fstw", 0x24000200, 0xfc001380, "cxcCfT,x(s,b)", pa11, FLAG_STRICT},
+{ "fstw", 0x24001220, 0xfc1ff3a0, "cocCfT,@(b)", pa20, FLAG_STRICT},
+{ "fstw", 0x24001220, 0xfc1f33a0, "cocCfT,@(s,b)", pa20, FLAG_STRICT},
+{ "fstw", 0x24001200, 0xfc00df80, "cMfT,5(b)", pa10, FLAG_STRICT},
+{ "fstw", 0x24001200, 0xfc001f80, "cMfT,5(s,b)", pa10, FLAG_STRICT},
+{ "fstw", 0x24001200, 0xfc00df80, "cMfT,5(b)", pa10, FLAG_STRICT},
+{ "fstw", 0x24001200, 0xfc001f80, "cMfT,5(s,b)", pa10, FLAG_STRICT},
+{ "fstw", 0x7c000000, 0xfc000004, "fE,y(b)", pa20w, FLAG_STRICT},
+{ "fstw", 0x78000000, 0xfc000000, "cJfE,y(b)", pa20w, FLAG_STRICT},
+{ "fstw", 0x7c000000, 0xfc00c004, "fE,d(b)", pa20, FLAG_STRICT},
+{ "fstw", 0x7c000000, 0xfc000004, "fE,d(s,b)", pa20, FLAG_STRICT},
+{ "fstw", 0x78000000, 0xfc00c000, "cJfE,d(b)", pa20, FLAG_STRICT},
+{ "fstw", 0x78000000, 0xfc000000, "cJfE,d(s,b)", pa20, FLAG_STRICT},
+{ "fstd", 0x2c000200, 0xfc00dfc0, "cXft,x(b)", pa10, FLAG_STRICT},
+{ "fstd", 0x2c000200, 0xfc001fc0, "cXft,x(s,b)", pa10, FLAG_STRICT},
+{ "fstd", 0x2c000200, 0xfc00d3c0, "cxcCft,x(b)", pa11, FLAG_STRICT},
+{ "fstd", 0x2c000200, 0xfc0013c0, "cxcCft,x(s,b)", pa11, FLAG_STRICT},
+{ "fstd", 0x2c001220, 0xfc1ff3e0, "cocCft,@(b)", pa20, FLAG_STRICT},
+{ "fstd", 0x2c001220, 0xfc1f33e0, "cocCft,@(s,b)", pa20, FLAG_STRICT},
+{ "fstd", 0x2c001200, 0xfc00dfc0, "cMft,5(b)", pa10, FLAG_STRICT},
+{ "fstd", 0x2c001200, 0xfc001fc0, "cMft,5(s,b)", pa10, FLAG_STRICT},
+{ "fstd", 0x2c001200, 0xfc00d3c0, "cmcCft,5(b)", pa11, FLAG_STRICT},
+{ "fstd", 0x2c001200, 0xfc0013c0, "cmcCft,5(s,b)", pa11, FLAG_STRICT},
+{ "fstd", 0x70000002, 0xfc000002, "cqfx,&(b)", pa20w, FLAG_STRICT},
+{ "fstd", 0x70000002, 0xfc00c002, "cqfx,#(b)", pa20, FLAG_STRICT},
+{ "fstd", 0x70000002, 0xfc000002, "cqfx,#(s,b)", pa20, FLAG_STRICT},
+{ "fldwx", 0x24000000, 0xfc00df80, "cXx(b),fT", pa10, FLAG_STRICT},
+{ "fldwx", 0x24000000, 0xfc001f80, "cXx(s,b),fT", pa10, FLAG_STRICT},
+{ "fldwx", 0x24000000, 0xfc00d380, "cxccx(b),fT", pa11, FLAG_STRICT},
+{ "fldwx", 0x24000000, 0xfc001380, "cxccx(s,b),fT", pa11, FLAG_STRICT},
+{ "fldwx", 0x24000000, 0xfc00df80, "cXx(b),fT", pa10, 0},
+{ "fldwx", 0x24000000, 0xfc001f80, "cXx(s,b),fT", pa10, 0},
+{ "flddx", 0x2c000000, 0xfc00dfc0, "cXx(b),ft", pa10, FLAG_STRICT},
+{ "flddx", 0x2c000000, 0xfc001fc0, "cXx(s,b),ft", pa10, FLAG_STRICT},
+{ "flddx", 0x2c000000, 0xfc00d3c0, "cxccx(b),ft", pa11, FLAG_STRICT},
+{ "flddx", 0x2c000000, 0xfc0013c0, "cxccx(s,b),ft", pa11, FLAG_STRICT},
+{ "flddx", 0x2c000000, 0xfc00dfc0, "cXx(b),ft", pa10, 0},
+{ "flddx", 0x2c000000, 0xfc001fc0, "cXx(s,b),ft", pa10, 0},
+{ "fstwx", 0x24000200, 0xfc00df80, "cxfT,x(b)", pa10, FLAG_STRICT},
+{ "fstwx", 0x24000200, 0xfc001f80, "cxfT,x(s,b)", pa10, FLAG_STRICT},
+{ "fstwx", 0x24000200, 0xfc00d380, "cxcCfT,x(b)", pa11, FLAG_STRICT},
+{ "fstwx", 0x24000200, 0xfc001380, "cxcCfT,x(s,b)", pa11, FLAG_STRICT},
+{ "fstwx", 0x24000200, 0xfc00df80, "cxfT,x(b)", pa10, 0},
+{ "fstwx", 0x24000200, 0xfc001f80, "cxfT,x(s,b)", pa10, 0},
+{ "fstdx", 0x2c000200, 0xfc00dfc0, "cxft,x(b)", pa10, FLAG_STRICT},
+{ "fstdx", 0x2c000200, 0xfc001fc0, "cxft,x(s,b)", pa10, FLAG_STRICT},
+{ "fstdx", 0x2c000200, 0xfc00d3c0, "cxcCft,x(b)", pa11, FLAG_STRICT},
+{ "fstdx", 0x2c000200, 0xfc0013c0, "cxcCft,x(s,b)", pa11, FLAG_STRICT},
+{ "fstdx", 0x2c000200, 0xfc00dfc0, "cxft,x(b)", pa10, 0},
+{ "fstdx", 0x2c000200, 0xfc001fc0, "cxft,x(s,b)", pa10, 0},
+{ "fstqx", 0x3c000200, 0xfc00dfc0, "cxft,x(b)", pa10, 0},
+{ "fstqx", 0x3c000200, 0xfc001fc0, "cxft,x(s,b)", pa10, 0},
+{ "fldws", 0x24001000, 0xfc00df80, "cm5(b),fT", pa10, FLAG_STRICT},
+{ "fldws", 0x24001000, 0xfc001f80, "cm5(s,b),fT", pa10, FLAG_STRICT},
+{ "fldws", 0x24001000, 0xfc00d380, "cmcc5(b),fT", pa11, FLAG_STRICT},
+{ "fldws", 0x24001000, 0xfc001380, "cmcc5(s,b),fT", pa11, FLAG_STRICT},
+{ "fldws", 0x24001000, 0xfc00df80, "cm5(b),fT", pa10, 0},
+{ "fldws", 0x24001000, 0xfc001f80, "cm5(s,b),fT", pa10, 0},
+{ "fldds", 0x2c001000, 0xfc00dfc0, "cm5(b),ft", pa10, FLAG_STRICT},
+{ "fldds", 0x2c001000, 0xfc001fc0, "cm5(s,b),ft", pa10, FLAG_STRICT},
+{ "fldds", 0x2c001000, 0xfc00d3c0, "cmcc5(b),ft", pa11, FLAG_STRICT},
+{ "fldds", 0x2c001000, 0xfc0013c0, "cmcc5(s,b),ft", pa11, FLAG_STRICT},
+{ "fldds", 0x2c001000, 0xfc00dfc0, "cm5(b),ft", pa10, 0},
+{ "fldds", 0x2c001000, 0xfc001fc0, "cm5(s,b),ft", pa10, 0},
+{ "fstws", 0x24001200, 0xfc00df80, "cmfT,5(b)", pa10, FLAG_STRICT},
+{ "fstws", 0x24001200, 0xfc001f80, "cmfT,5(s,b)", pa10, FLAG_STRICT},
+{ "fstws", 0x24001200, 0xfc00d380, "cmcCfT,5(b)", pa11, FLAG_STRICT},
+{ "fstws", 0x24001200, 0xfc001380, "cmcCfT,5(s,b)", pa11, FLAG_STRICT},
+{ "fstws", 0x24001200, 0xfc00df80, "cmfT,5(b)", pa10, 0},
+{ "fstws", 0x24001200, 0xfc001f80, "cmfT,5(s,b)", pa10, 0},
+{ "fstds", 0x2c001200, 0xfc00dfc0, "cmft,5(b)", pa10, FLAG_STRICT},
+{ "fstds", 0x2c001200, 0xfc001fc0, "cmft,5(s,b)", pa10, FLAG_STRICT},
+{ "fstds", 0x2c001200, 0xfc00d3c0, "cmcCft,5(b)", pa11, FLAG_STRICT},
+{ "fstds", 0x2c001200, 0xfc0013c0, "cmcCft,5(s,b)", pa11, FLAG_STRICT},
+{ "fstds", 0x2c001200, 0xfc00dfc0, "cmft,5(b)", pa10, 0},
+{ "fstds", 0x2c001200, 0xfc001fc0, "cmft,5(s,b)", pa10, 0},
+{ "fstqs", 0x3c001200, 0xfc00dfc0, "cmft,5(b)", pa10, 0},
+{ "fstqs", 0x3c001200, 0xfc001fc0, "cmft,5(s,b)", pa10, 0},
+{ "fadd", 0x30000600, 0xfc00e7e0, "Ffa,fb,fT", pa10, 0},
+{ "fadd", 0x38000600, 0xfc00e720, "IfA,fB,fT", pa10, 0},
+{ "fsub", 0x30002600, 0xfc00e7e0, "Ffa,fb,fT", pa10, 0},
+{ "fsub", 0x38002600, 0xfc00e720, "IfA,fB,fT", pa10, 0},
+{ "fmpy", 0x30004600, 0xfc00e7e0, "Ffa,fb,fT", pa10, 0},
+{ "fmpy", 0x38004600, 0xfc00e720, "IfA,fB,fT", pa10, 0},
+{ "fdiv", 0x30006600, 0xfc00e7e0, "Ffa,fb,fT", pa10, 0},
+{ "fdiv", 0x38006600, 0xfc00e720, "IfA,fB,fT", pa10, 0},
+{ "fsqrt", 0x30008000, 0xfc1fe7e0, "Ffa,fT", pa10, 0},
+{ "fsqrt", 0x38008000, 0xfc1fe720, "FfA,fT", pa10, 0},
+{ "fabs", 0x30006000, 0xfc1fe7e0, "Ffa,fT", pa10, 0},
+{ "fabs", 0x38006000, 0xfc1fe720, "FfA,fT", pa10, 0},
+{ "frem", 0x30008600, 0xfc00e7e0, "Ffa,fb,fT", pa10, 0},
+{ "frem", 0x38008600, 0xfc00e720, "FfA,fB,fT", pa10, 0},
+{ "frnd", 0x3000a000, 0xfc1fe7e0, "Ffa,fT", pa10, 0},
+{ "frnd", 0x3800a000, 0xfc1fe720, "FfA,fT", pa10, 0},
+{ "fcpy", 0x30004000, 0xfc1fe7e0, "Ffa,fT", pa10, 0},
+{ "fcpy", 0x38004000, 0xfc1fe720, "FfA,fT", pa10, 0},
+{ "fcnvff", 0x30000200, 0xfc1f87e0, "FGfa,fT", pa10, 0},
+{ "fcnvff", 0x38000200, 0xfc1f8720, "FGfA,fT", pa10, 0},
+{ "fcnvxf", 0x30008200, 0xfc1f87e0, "FGfa,fT", pa10, 0},
+{ "fcnvxf", 0x38008200, 0xfc1f8720, "FGfA,fT", pa10, 0},
+{ "fcnvfx", 0x30010200, 0xfc1f87e0, "FGfa,fT", pa10, 0},
+{ "fcnvfx", 0x38010200, 0xfc1f8720, "FGfA,fT", pa10, 0},
+{ "fcnvfxt", 0x30018200, 0xfc1f87e0, "FGfa,fT", pa10, 0},
+{ "fcnvfxt", 0x38018200, 0xfc1f8720, "FGfA,fT", pa10, 0},
+{ "fmpyfadd", 0xb8000000, 0xfc000020, "IfA,fB,fC,fT", pa20, FLAG_STRICT},
+{ "fmpynfadd", 0xb8000020, 0xfc000020, "IfA,fB,fC,fT", pa20, FLAG_STRICT},
+{ "fneg", 0x3000c000, 0xfc1fe7e0, "Ffa,fT", pa20, FLAG_STRICT},
+{ "fneg", 0x3800c000, 0xfc1fe720, "IfA,fT", pa20, FLAG_STRICT},
+{ "fnegabs", 0x3000e000, 0xfc1fe7e0, "Ffa,fT", pa20, FLAG_STRICT},
+{ "fnegabs", 0x3800e000, 0xfc1fe720, "IfA,fT", pa20, FLAG_STRICT},
+{ "fcnv", 0x30000200, 0xfc1c0720, "{_fa,fT", pa20, FLAG_STRICT},
+{ "fcnv", 0x38000200, 0xfc1c0720, "FGfA,fT", pa20, FLAG_STRICT},
+{ "fcmp", 0x30000400, 0xfc00e7e0, "F?ffa,fb", pa10, FLAG_STRICT},
+{ "fcmp", 0x38000400, 0xfc00e720, "I?ffA,fB", pa10, FLAG_STRICT},
+{ "fcmp", 0x30000400, 0xfc0007e0, "F?ffa,fb,h", pa20, FLAG_STRICT},
+{ "fcmp", 0x38000400, 0xfc000720, "I?ffA,fB,h", pa20, FLAG_STRICT},
+{ "fcmp", 0x30000400, 0xfc00e7e0, "F?ffa,fb", pa10, 0},
+{ "fcmp", 0x38000400, 0xfc00e720, "I?ffA,fB", pa10, 0},
+{ "xmpyu", 0x38004700, 0xfc00e720, "fX,fB,fT", pa11, 0},
+{ "fmpyadd", 0x18000000, 0xfc000000, "Hfi,fj,fk,fl,fm", pa11, 0},
+{ "fmpysub", 0x98000000, 0xfc000000, "Hfi,fj,fk,fl,fm", pa11, 0},
+{ "ftest", 0x30002420, 0xffffffff, "", pa10, FLAG_STRICT},
+{ "ftest", 0x30002420, 0xffffffe0, ",=", pa20, FLAG_STRICT},
+{ "ftest", 0x30000420, 0xffff1fff, "m", pa20, FLAG_STRICT},
+{ "fid", 0x30000000, 0xffffffff, "", pa11, 0},
+
+/* Performance Monitor Instructions. */
+
+{ "pmdis", 0x30000280, 0xffffffdf, "N", pa20, FLAG_STRICT},
+{ "pmenb", 0x30000680, 0xffffffff, "", pa20, FLAG_STRICT},
+
+/* Assist Instructions. */
+
+{ "spop0", 0x10000000, 0xfc000600, "v,ON", pa10, 0},
+{ "spop1", 0x10000200, 0xfc000600, "v,oNt", pa10, 0},
+{ "spop2", 0x10000400, 0xfc000600, "v,1Nb", pa10, 0},
+{ "spop3", 0x10000600, 0xfc000600, "v,0Nx,b", pa10, 0},
+{ "copr", 0x30000000, 0xfc000000, "u,2N", pa10, 0},
+{ "cldw", 0x24000000, 0xfc00de00, "ucXx(b),t", pa10, FLAG_STRICT},
+{ "cldw", 0x24000000, 0xfc001e00, "ucXx(s,b),t", pa10, FLAG_STRICT},
+{ "cldw", 0x24000000, 0xfc00d200, "ucxccx(b),t", pa11, FLAG_STRICT},
+{ "cldw", 0x24000000, 0xfc001200, "ucxccx(s,b),t", pa11, FLAG_STRICT},
+{ "cldw", 0x24001000, 0xfc00d200, "ucocc@(b),t", pa20, FLAG_STRICT},
+{ "cldw", 0x24001000, 0xfc001200, "ucocc@(s,b),t", pa20, FLAG_STRICT},
+{ "cldw", 0x24001000, 0xfc00de00, "ucM5(b),t", pa10, FLAG_STRICT},
+{ "cldw", 0x24001000, 0xfc001e00, "ucM5(s,b),t", pa10, FLAG_STRICT},
+{ "cldw", 0x24001000, 0xfc00d200, "ucmcc5(b),t", pa11, FLAG_STRICT},
+{ "cldw", 0x24001000, 0xfc001200, "ucmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "cldd", 0x2c000000, 0xfc00de00, "ucXx(b),t", pa10, FLAG_STRICT},
+{ "cldd", 0x2c000000, 0xfc001e00, "ucXx(s,b),t", pa10, FLAG_STRICT},
+{ "cldd", 0x2c000000, 0xfc00d200, "ucxccx(b),t", pa11, FLAG_STRICT},
+{ "cldd", 0x2c000000, 0xfc001200, "ucxccx(s,b),t", pa11, FLAG_STRICT},
+{ "cldd", 0x2c001000, 0xfc00d200, "ucocc@(b),t", pa20, FLAG_STRICT},
+{ "cldd", 0x2c001000, 0xfc001200, "ucocc@(s,b),t", pa20, FLAG_STRICT},
+{ "cldd", 0x2c001000, 0xfc00de00, "ucM5(b),t", pa10, FLAG_STRICT},
+{ "cldd", 0x2c001000, 0xfc001e00, "ucM5(s,b),t", pa10, FLAG_STRICT},
+{ "cldd", 0x2c001000, 0xfc00d200, "ucmcc5(b),t", pa11, FLAG_STRICT},
+{ "cldd", 0x2c001000, 0xfc001200, "ucmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "cstw", 0x24000200, 0xfc00de00, "ucXt,x(b)", pa10, FLAG_STRICT},
+{ "cstw", 0x24000200, 0xfc001e00, "ucXt,x(s,b)", pa10, FLAG_STRICT},
+{ "cstw", 0x24000200, 0xfc00d200, "ucxcCt,x(b)", pa11, FLAG_STRICT},
+{ "cstw", 0x24000200, 0xfc001200, "ucxcCt,x(s,b)", pa11, FLAG_STRICT},
+{ "cstw", 0x24001200, 0xfc00d200, "ucocCt,@(b)", pa20, FLAG_STRICT},
+{ "cstw", 0x24001200, 0xfc001200, "ucocCt,@(s,b)", pa20, FLAG_STRICT},
+{ "cstw", 0x24001200, 0xfc00de00, "ucMt,5(b)", pa10, FLAG_STRICT},
+{ "cstw", 0x24001200, 0xfc001e00, "ucMt,5(s,b)", pa10, FLAG_STRICT},
+{ "cstw", 0x24001200, 0xfc00d200, "ucmcCt,5(b)", pa11, FLAG_STRICT},
+{ "cstw", 0x24001200, 0xfc001200, "ucmcCt,5(s,b)", pa11, FLAG_STRICT},
+{ "cstd", 0x2c000200, 0xfc00de00, "ucXt,x(b)", pa10, FLAG_STRICT},
+{ "cstd", 0x2c000200, 0xfc001e00, "ucXt,x(s,b)", pa10, FLAG_STRICT},
+{ "cstd", 0x2c000200, 0xfc00d200, "ucxcCt,x(b)", pa11, FLAG_STRICT},
+{ "cstd", 0x2c000200, 0xfc001200, "ucxcCt,x(s,b)", pa11, FLAG_STRICT},
+{ "cstd", 0x2c001200, 0xfc00d200, "ucocCt,@(b)", pa20, FLAG_STRICT},
+{ "cstd", 0x2c001200, 0xfc001200, "ucocCt,@(s,b)", pa20, FLAG_STRICT},
+{ "cstd", 0x2c001200, 0xfc00de00, "ucMt,5(b)", pa10, FLAG_STRICT},
+{ "cstd", 0x2c001200, 0xfc001e00, "ucMt,5(s,b)", pa10, FLAG_STRICT},
+{ "cstd", 0x2c001200, 0xfc00d200, "ucmcCt,5(b)", pa11, FLAG_STRICT},
+{ "cstd", 0x2c001200, 0xfc001200, "ucmcCt,5(s,b)", pa11, FLAG_STRICT},
+{ "cldwx", 0x24000000, 0xfc00de00, "ucXx(b),t", pa10, FLAG_STRICT},
+{ "cldwx", 0x24000000, 0xfc001e00, "ucXx(s,b),t", pa10, FLAG_STRICT},
+{ "cldwx", 0x24000000, 0xfc00d200, "ucxccx(b),t", pa11, FLAG_STRICT},
+{ "cldwx", 0x24000000, 0xfc001200, "ucxccx(s,b),t", pa11, FLAG_STRICT},
+{ "cldwx", 0x24000000, 0xfc00de00, "ucXx(b),t", pa10, 0},
+{ "cldwx", 0x24000000, 0xfc001e00, "ucXx(s,b),t", pa10, 0},
+{ "clddx", 0x2c000000, 0xfc00de00, "ucXx(b),t", pa10, FLAG_STRICT},
+{ "clddx", 0x2c000000, 0xfc001e00, "ucXx(s,b),t", pa10, FLAG_STRICT},
+{ "clddx", 0x2c000000, 0xfc00d200, "ucxccx(b),t", pa11, FLAG_STRICT},
+{ "clddx", 0x2c000000, 0xfc001200, "ucxccx(s,b),t", pa11, FLAG_STRICT},
+{ "clddx", 0x2c000000, 0xfc00de00, "ucXx(b),t", pa10, 0},
+{ "clddx", 0x2c000000, 0xfc001e00, "ucXx(s,b),t", pa10, 0},
+{ "cstwx", 0x24000200, 0xfc00de00, "ucXt,x(b)", pa10, FLAG_STRICT},
+{ "cstwx", 0x24000200, 0xfc001e00, "ucXt,x(s,b)", pa10, FLAG_STRICT},
+{ "cstwx", 0x24000200, 0xfc00d200, "ucxcCt,x(b)", pa11, FLAG_STRICT},
+{ "cstwx", 0x24000200, 0xfc001200, "ucxcCt,x(s,b)", pa11, FLAG_STRICT},
+{ "cstwx", 0x24000200, 0xfc00de00, "ucXt,x(b)", pa10, 0},
+{ "cstwx", 0x24000200, 0xfc001e00, "ucXt,x(s,b)", pa10, 0},
+{ "cstdx", 0x2c000200, 0xfc00de00, "ucXt,x(b)", pa10, FLAG_STRICT},
+{ "cstdx", 0x2c000200, 0xfc001e00, "ucXt,x(s,b)", pa10, FLAG_STRICT},
+{ "cstdx", 0x2c000200, 0xfc00d200, "ucxcCt,x(b)", pa11, FLAG_STRICT},
+{ "cstdx", 0x2c000200, 0xfc001200, "ucxcCt,x(s,b)", pa11, FLAG_STRICT},
+{ "cstdx", 0x2c000200, 0xfc00de00, "ucXt,x(b)", pa10, 0},
+{ "cstdx", 0x2c000200, 0xfc001e00, "ucXt,x(s,b)", pa10, 0},
+{ "cldws", 0x24001000, 0xfc00de00, "ucM5(b),t", pa10, FLAG_STRICT},
+{ "cldws", 0x24001000, 0xfc001e00, "ucM5(s,b),t", pa10, FLAG_STRICT},
+{ "cldws", 0x24001000, 0xfc00d200, "ucmcc5(b),t", pa11, FLAG_STRICT},
+{ "cldws", 0x24001000, 0xfc001200, "ucmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "cldws", 0x24001000, 0xfc00de00, "ucM5(b),t", pa10, 0},
+{ "cldws", 0x24001000, 0xfc001e00, "ucM5(s,b),t", pa10, 0},
+{ "cldds", 0x2c001000, 0xfc00de00, "ucM5(b),t", pa10, FLAG_STRICT},
+{ "cldds", 0x2c001000, 0xfc001e00, "ucM5(s,b),t", pa10, FLAG_STRICT},
+{ "cldds", 0x2c001000, 0xfc00d200, "ucmcc5(b),t", pa11, FLAG_STRICT},
+{ "cldds", 0x2c001000, 0xfc001200, "ucmcc5(s,b),t", pa11, FLAG_STRICT},
+{ "cldds", 0x2c001000, 0xfc00de00, "ucM5(b),t", pa10, 0},
+{ "cldds", 0x2c001000, 0xfc001e00, "ucM5(s,b),t", pa10, 0},
+{ "cstws", 0x24001200, 0xfc00de00, "ucMt,5(b)", pa10, FLAG_STRICT},
+{ "cstws", 0x24001200, 0xfc001e00, "ucMt,5(s,b)", pa10, FLAG_STRICT},
+{ "cstws", 0x24001200, 0xfc00d200, "ucmcCt,5(b)", pa11, FLAG_STRICT},
+{ "cstws", 0x24001200, 0xfc001200, "ucmcCt,5(s,b)", pa11, FLAG_STRICT},
+{ "cstws", 0x24001200, 0xfc00de00, "ucMt,5(b)", pa10, 0},
+{ "cstws", 0x24001200, 0xfc001e00, "ucMt,5(s,b)", pa10, 0},
+{ "cstds", 0x2c001200, 0xfc00de00, "ucMt,5(b)", pa10, FLAG_STRICT},
+{ "cstds", 0x2c001200, 0xfc001e00, "ucMt,5(s,b)", pa10, FLAG_STRICT},
+{ "cstds", 0x2c001200, 0xfc00d200, "ucmcCt,5(b)", pa11, FLAG_STRICT},
+{ "cstds", 0x2c001200, 0xfc001200, "ucmcCt,5(s,b)", pa11, FLAG_STRICT},
+{ "cstds", 0x2c001200, 0xfc00de00, "ucMt,5(b)", pa10, 0},
+{ "cstds", 0x2c001200, 0xfc001e00, "ucMt,5(s,b)", pa10, 0},
+
+/* More pseudo instructions which must follow the main table. */
+{ "call", 0xe800f000, 0xfc1ffffd, "n(b)", pa20, FLAG_STRICT},
+{ "call", 0xe800a000, 0xffe0e000, "nW", pa10, FLAG_STRICT},
+{ "ret", 0xe840d000, 0xfffffffd, "n", pa20, FLAG_STRICT},
+
+};
+
+#define NUMOPCODES ((sizeof pa_opcodes)/(sizeof pa_opcodes[0]))
+
+/* SKV 12/18/92. Added some denotations for various operands. */
+
+#define PA_IMM11_AT_31 'i'
+#define PA_IMM14_AT_31 'j'
+#define PA_IMM21_AT_31 'k'
+#define PA_DISP12 'w'
+#define PA_DISP17 'W'
+
+#define N_HPPA_OPERAND_FORMATS 5
+
+/* Integer register names, indexed by the numbers which appear in the
+ opcodes. */
+static const char *const reg_names[] =
+{
+ "flags", "r1", "rp", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
+ "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23", "r24", "r25", "r26", "dp", "ret0", "ret1",
+ "sp", "r31"
+};
+
+/* Floating point register names, indexed by the numbers which appear in the
+ opcodes. */
+static const char *const fp_reg_names[] =
+{
+ "fpsr", "fpe2", "fpe4", "fpe6",
+ "fr4", "fr5", "fr6", "fr7", "fr8",
+ "fr9", "fr10", "fr11", "fr12", "fr13", "fr14", "fr15",
+ "fr16", "fr17", "fr18", "fr19", "fr20", "fr21", "fr22", "fr23",
+ "fr24", "fr25", "fr26", "fr27", "fr28", "fr29", "fr30", "fr31"
+};
+
+typedef unsigned int CORE_ADDR;
+
+/* Get at various relevant fields of an instruction word. */
+
+#define MASK_5 0x1f
+#define MASK_10 0x3ff
+#define MASK_11 0x7ff
+#define MASK_14 0x3fff
+#define MASK_16 0xffff
+#define MASK_21 0x1fffff
+
+/* These macros get bit fields using HP's numbering (MSB = 0). */
+
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
+
+#define GET_BIT(X, WHICH) \
+ GET_FIELD (X, WHICH, WHICH)
+
+/* Some of these have been converted to 2-d arrays because they
+ consume less storage this way. If the maintenance becomes a
+ problem, convert them back to const 1-d pointer arrays. */
+static const char *const control_reg[] =
+{
+ "rctr", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",
+ "pidr1", "pidr2", "ccr", "sar", "pidr3", "pidr4",
+ "iva", "eiem", "itmr", "pcsq", "pcoq", "iir", "isr",
+ "ior", "ipsw", "eirr", "tr0", "tr1", "tr2", "tr3",
+ "tr4", "tr5", "tr6", "tr7"
+};
+
+static const char *const compare_cond_names[] =
+{
+ "", ",=", ",<", ",<=", ",<<", ",<<=", ",sv", ",od",
+ ",tr", ",<>", ",>=", ",>", ",>>=", ",>>", ",nsv", ",ev"
+};
+static const char *const compare_cond_64_names[] =
+{
+ "", ",*=", ",*<", ",*<=", ",*<<", ",*<<=", ",*sv", ",*od",
+ ",*tr", ",*<>", ",*>=", ",*>", ",*>>=", ",*>>", ",*nsv", ",*ev"
+};
+static const char *const cmpib_cond_64_names[] =
+{
+ ",*<<", ",*=", ",*<", ",*<=", ",*>>=", ",*<>", ",*>=", ",*>"
+};
+static const char *const add_cond_names[] =
+{
+ "", ",=", ",<", ",<=", ",nuv", ",znv", ",sv", ",od",
+ ",tr", ",<>", ",>=", ",>", ",uv", ",vnz", ",nsv", ",ev"
+};
+static const char *const add_cond_64_names[] =
+{
+ "", ",*=", ",*<", ",*<=", ",*nuv", ",*znv", ",*sv", ",*od",
+ ",*tr", ",*<>", ",*>=", ",*>", ",*uv", ",*vnz", ",*nsv", ",*ev"
+};
+static const char *const wide_add_cond_names[] =
+{
+ "", ",=", ",<", ",<=", ",nuv", ",*=", ",*<", ",*<=",
+ ",tr", ",<>", ",>=", ",>", ",uv", ",*<>", ",*>=", ",*>"
+};
+static const char *const logical_cond_names[] =
+{
+ "", ",=", ",<", ",<=", 0, 0, 0, ",od",
+ ",tr", ",<>", ",>=", ",>", 0, 0, 0, ",ev"};
+static const char *const logical_cond_64_names[] =
+{
+ "", ",*=", ",*<", ",*<=", 0, 0, 0, ",*od",
+ ",*tr", ",*<>", ",*>=", ",*>", 0, 0, 0, ",*ev"};
+static const char *const unit_cond_names[] =
+{
+ "", ",swz", ",sbz", ",shz", ",sdc", ",swc", ",sbc", ",shc",
+ ",tr", ",nwz", ",nbz", ",nhz", ",ndc", ",nwc", ",nbc", ",nhc"
+};
+static const char *const unit_cond_64_names[] =
+{
+ "", ",*swz", ",*sbz", ",*shz", ",*sdc", ",*swc", ",*sbc", ",*shc",
+ ",*tr", ",*nwz", ",*nbz", ",*nhz", ",*ndc", ",*nwc", ",*nbc", ",*nhc"
+};
+static const char *const shift_cond_names[] =
+{
+ "", ",=", ",<", ",od", ",tr", ",<>", ",>=", ",ev"
+};
+static const char *const shift_cond_64_names[] =
+{
+ "", ",*=", ",*<", ",*od", ",*tr", ",*<>", ",*>=", ",*ev"
+};
+static const char *const bb_cond_64_names[] =
+{
+ ",*<", ",*>="
+};
+static const char *const index_compl_names[] = {"", ",m", ",s", ",sm"};
+static const char *const short_ldst_compl_names[] = {"", ",ma", "", ",mb"};
+static const char *const short_bytes_compl_names[] =
+{
+ "", ",b,m", ",e", ",e,m"
+};
+static const char *const float_format_names[] = {",sgl", ",dbl", "", ",quad"};
+static const char *const fcnv_fixed_names[] = {",w", ",dw", "", ",qw"};
+static const char *const fcnv_ufixed_names[] = {",uw", ",udw", "", ",uqw"};
+static const char *const float_comp_names[] =
+{
+ ",false?", ",false", ",?", ",!<=>", ",=", ",=t", ",?=", ",!<>",
+ ",!?>=", ",<", ",?<", ",!>=", ",!?>", ",<=", ",?<=", ",!>",
+ ",!?<=", ",>", ",?>", ",!<=", ",!?<", ",>=", ",?>=", ",!<",
+ ",!?=", ",<>", ",!=", ",!=t", ",!?", ",<=>", ",true?", ",true"
+};
+static const char *const signed_unsigned_names[] = {",u", ",s"};
+static const char *const mix_half_names[] = {",l", ",r"};
+static const char *const saturation_names[] = {",us", ",ss", 0, ""};
+static const char *const read_write_names[] = {",r", ",w"};
+static const char *const add_compl_names[] = { 0, "", ",l", ",tsv" };
+
+/* For a bunch of different instructions form an index into a
+ completer name table. */
+#define GET_COMPL(insn) (GET_FIELD (insn, 26, 26) | \
+ GET_FIELD (insn, 18, 18) << 1)
+
+#define GET_COND(insn) (GET_FIELD ((insn), 16, 18) + \
+ (GET_FIELD ((insn), 19, 19) ? 8 : 0))
+
+/* Utility function to print registers. Put these first, so gcc's function
+ inlining can do its stuff. */
+
+#define fputs_filtered(STR,F) (*info->fprintf_func) (info->stream, "%s", STR)
+
+static void
+fput_reg (unsigned reg, disassemble_info *info)
+{
+ (*info->fprintf_func) (info->stream, "%s", reg ? reg_names[reg] : "r0");
+}
+
+static void
+fput_fp_reg (unsigned reg, disassemble_info *info)
+{
+ (*info->fprintf_func) (info->stream, "%s", reg ? fp_reg_names[reg] : "fr0");
+}
+
+static void
+fput_fp_reg_r (unsigned reg, disassemble_info *info)
+{
+ /* Special case floating point exception registers. */
+ if (reg < 4)
+ (*info->fprintf_func) (info->stream, "fpe%d", reg * 2 + 1);
+ else
+ (*info->fprintf_func) (info->stream, "%sR",
+ reg ? fp_reg_names[reg] : "fr0");
+}
+
+static void
+fput_creg (unsigned reg, disassemble_info *info)
+{
+ (*info->fprintf_func) (info->stream, "%s", control_reg[reg]);
+}
+
+/* Print constants with sign. */
+
+static void
+fput_const (unsigned num, disassemble_info *info)
+{
+ if ((int) num < 0)
+ (*info->fprintf_func) (info->stream, "-%x", - (int) num);
+ else
+ (*info->fprintf_func) (info->stream, "%x", num);
+}
+
+/* Routines to extract various sized constants out of hppa
+ instructions. */
+
+/* Extract a 3-bit space register number from a be, ble, mtsp or mfsp. */
+static int
+extract_3 (unsigned word)
+{
+ return GET_FIELD (word, 18, 18) << 2 | GET_FIELD (word, 16, 17);
+}
+
+static int
+extract_5_load (unsigned word)
+{
+ return low_sign_extend (word >> 16 & MASK_5, 5);
+}
+
+/* Extract the immediate field from a st{bhw}s instruction. */
+
+static int
+extract_5_store (unsigned word)
+{
+ return low_sign_extend (word & MASK_5, 5);
+}
+
+/* Extract the immediate field from a break instruction. */
+
+static unsigned
+extract_5r_store (unsigned word)
+{
+ return (word & MASK_5);
+}
+
+/* Extract the immediate field from a {sr}sm instruction. */
+
+static unsigned
+extract_5R_store (unsigned word)
+{
+ return (word >> 16 & MASK_5);
+}
+
+/* Extract the 10 bit immediate field from a {sr}sm instruction. */
+
+static unsigned
+extract_10U_store (unsigned word)
+{
+ return (word >> 16 & MASK_10);
+}
+
+/* Extract the immediate field from a bb instruction. */
+
+static unsigned
+extract_5Q_store (unsigned word)
+{
+ return (word >> 21 & MASK_5);
+}
+
+/* Extract an 11 bit immediate field. */
+
+static int
+extract_11 (unsigned word)
+{
+ return low_sign_extend (word & MASK_11, 11);
+}
+
+/* Extract a 14 bit immediate field. */
+
+static int
+extract_14 (unsigned word)
+{
+ return low_sign_extend (word & MASK_14, 14);
+}
+
+/* Extract a 16 bit immediate field (PA2.0 wide only). */
+
+static int
+extract_16 (unsigned word)
+{
+ int m15, m0, m1;
+
+ m0 = GET_BIT (word, 16);
+ m1 = GET_BIT (word, 17);
+ m15 = GET_BIT (word, 31);
+ word = (word >> 1) & 0x1fff;
+ word = word | (m15 << 15) | ((m15 ^ m0) << 14) | ((m15 ^ m1) << 13);
+ return sign_extend (word, 16);
+}
+
+/* Extract a 21 bit constant. */
+
+static int
+extract_21 (unsigned word)
+{
+ int val;
+
+ word &= MASK_21;
+ word <<= 11;
+ val = GET_FIELD (word, 20, 20);
+ val <<= 11;
+ val |= GET_FIELD (word, 9, 19);
+ val <<= 2;
+ val |= GET_FIELD (word, 5, 6);
+ val <<= 5;
+ val |= GET_FIELD (word, 0, 4);
+ val <<= 2;
+ val |= GET_FIELD (word, 7, 8);
+ return sign_extend (val, 21) << 11;
+}
+
+/* Extract a 12 bit constant from branch instructions. */
+
+static int
+extract_12 (unsigned word)
+{
+ return sign_extend (GET_FIELD (word, 19, 28)
+ | GET_FIELD (word, 29, 29) << 10
+ | (word & 0x1) << 11, 12) << 2;
+}
+
+/* Extract a 17 bit constant from branch instructions, returning the
+ 19 bit signed value. */
+
+static int
+extract_17 (unsigned word)
+{
+ return sign_extend (GET_FIELD (word, 19, 28)
+ | GET_FIELD (word, 29, 29) << 10
+ | GET_FIELD (word, 11, 15) << 11
+ | (word & 0x1) << 16, 17) << 2;
+}
+
+static int
+extract_22 (unsigned word)
+{
+ return sign_extend (GET_FIELD (word, 19, 28)
+ | GET_FIELD (word, 29, 29) << 10
+ | GET_FIELD (word, 11, 15) << 11
+ | GET_FIELD (word, 6, 10) << 16
+ | (word & 0x1) << 21, 22) << 2;
+}
+
+/* Print one instruction. */
+
+int
+print_insn_hppa (bfd_vma memaddr, disassemble_info *info)
+{
+ bfd_byte buffer[4];
+ unsigned int insn, i;
+
+ {
+ int status =
+ (*info->read_memory_func) (memaddr, buffer, sizeof (buffer), info);
+ if (status != 0)
+ {
+ (*info->memory_error_func) (status, memaddr, info);
+ return -1;
+ }
+ }
+
+ insn = bfd_getb32 (buffer);
+
+ for (i = 0; i < NUMOPCODES; ++i)
+ {
+ const struct pa_opcode *opcode = &pa_opcodes[i];
+
+ if ((insn & opcode->mask) == opcode->match)
+ {
+ const char *s;
+#ifndef BFD64
+ if (opcode->arch == pa20w)
+ continue;
+#endif
+ (*info->fprintf_func) (info->stream, "%s", opcode->name);
+
+ if (!strchr ("cfCY?-+nHNZFIuv{", opcode->args[0]))
+ (*info->fprintf_func) (info->stream, " ");
+ for (s = opcode->args; *s != '\0'; ++s)
+ {
+ switch (*s)
+ {
+ case 'x':
+ fput_reg (GET_FIELD (insn, 11, 15), info);
+ break;
+ case 'a':
+ case 'b':
+ fput_reg (GET_FIELD (insn, 6, 10), info);
+ break;
+ case '^':
+ fput_creg (GET_FIELD (insn, 6, 10), info);
+ break;
+ case 't':
+ fput_reg (GET_FIELD (insn, 27, 31), info);
+ break;
+
+ /* Handle floating point registers. */
+ case 'f':
+ switch (*++s)
+ {
+ case 't':
+ fput_fp_reg (GET_FIELD (insn, 27, 31), info);
+ break;
+ case 'T':
+ if (GET_FIELD (insn, 25, 25))
+ fput_fp_reg_r (GET_FIELD (insn, 27, 31), info);
+ else
+ fput_fp_reg (GET_FIELD (insn, 27, 31), info);
+ break;
+ case 'a':
+ if (GET_FIELD (insn, 25, 25))
+ fput_fp_reg_r (GET_FIELD (insn, 6, 10), info);
+ else
+ fput_fp_reg (GET_FIELD (insn, 6, 10), info);
+ break;
+
+ /* 'fA' will not generate a space before the regsiter
+ name. Normally that is fine. Except that it
+ causes problems with xmpyu which has no FP format
+ completer. */
+ case 'X':
+ fputs_filtered (" ", info);
+ /* FALLTHRU */
+
+ case 'A':
+ if (GET_FIELD (insn, 24, 24))
+ fput_fp_reg_r (GET_FIELD (insn, 6, 10), info);
+ else
+ fput_fp_reg (GET_FIELD (insn, 6, 10), info);
+ break;
+ case 'b':
+ if (GET_FIELD (insn, 25, 25))
+ fput_fp_reg_r (GET_FIELD (insn, 11, 15), info);
+ else
+ fput_fp_reg (GET_FIELD (insn, 11, 15), info);
+ break;
+ case 'B':
+ if (GET_FIELD (insn, 19, 19))
+ fput_fp_reg_r (GET_FIELD (insn, 11, 15), info);
+ else
+ fput_fp_reg (GET_FIELD (insn, 11, 15), info);
+ break;
+ case 'C':
+ {
+ int reg = GET_FIELD (insn, 21, 22);
+ reg |= GET_FIELD (insn, 16, 18) << 2;
+ if (GET_FIELD (insn, 23, 23) != 0)
+ fput_fp_reg_r (reg, info);
+ else
+ fput_fp_reg (reg, info);
+ break;
+ }
+ case 'i':
+ {
+ int reg = GET_FIELD (insn, 6, 10);
+
+ reg |= (GET_FIELD (insn, 26, 26) << 4);
+ fput_fp_reg (reg, info);
+ break;
+ }
+ case 'j':
+ {
+ int reg = GET_FIELD (insn, 11, 15);
+
+ reg |= (GET_FIELD (insn, 26, 26) << 4);
+ fput_fp_reg (reg, info);
+ break;
+ }
+ case 'k':
+ {
+ int reg = GET_FIELD (insn, 27, 31);
+
+ reg |= (GET_FIELD (insn, 26, 26) << 4);
+ fput_fp_reg (reg, info);
+ break;
+ }
+ case 'l':
+ {
+ int reg = GET_FIELD (insn, 21, 25);
+
+ reg |= (GET_FIELD (insn, 26, 26) << 4);
+ fput_fp_reg (reg, info);
+ break;
+ }
+ case 'm':
+ {
+ int reg = GET_FIELD (insn, 16, 20);
+
+ reg |= (GET_FIELD (insn, 26, 26) << 4);
+ fput_fp_reg (reg, info);
+ break;
+ }
+
+ /* 'fe' will not generate a space before the register
+ name. Normally that is fine. Except that it
+ causes problems with fstw fe,y(b) which has no FP
+ format completer. */
+ case 'E':
+ fputs_filtered (" ", info);
+ /* FALLTHRU */
+
+ case 'e':
+ if (GET_FIELD (insn, 30, 30))
+ fput_fp_reg_r (GET_FIELD (insn, 11, 15), info);
+ else
+ fput_fp_reg (GET_FIELD (insn, 11, 15), info);
+ break;
+ case 'x':
+ fput_fp_reg (GET_FIELD (insn, 11, 15), info);
+ break;
+ }
+ break;
+
+ case '5':
+ fput_const (extract_5_load (insn), info);
+ break;
+ case 's':
+ {
+ int space = GET_FIELD (insn, 16, 17);
+ /* Zero means implicit addressing, not use of sr0. */
+ if (space != 0)
+ (*info->fprintf_func) (info->stream, "sr%d", space);
+ }
+ break;
+
+ case 'S':
+ (*info->fprintf_func) (info->stream, "sr%d",
+ extract_3 (insn));
+ break;
+
+ /* Handle completers. */
+ case 'c':
+ switch (*++s)
+ {
+ case 'x':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ index_compl_names[GET_COMPL (insn)]);
+ break;
+ case 'X':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ index_compl_names[GET_COMPL (insn)]);
+ break;
+ case 'm':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ short_ldst_compl_names[GET_COMPL (insn)]);
+ break;
+ case 'M':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ short_ldst_compl_names[GET_COMPL (insn)]);
+ break;
+ case 'A':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ short_bytes_compl_names[GET_COMPL (insn)]);
+ break;
+ case 's':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ short_bytes_compl_names[GET_COMPL (insn)]);
+ break;
+ case 'c':
+ case 'C':
+ switch (GET_FIELD (insn, 20, 21))
+ {
+ case 1:
+ (*info->fprintf_func) (info->stream, ",bc ");
+ break;
+ case 2:
+ (*info->fprintf_func) (info->stream, ",sl ");
+ break;
+ default:
+ (*info->fprintf_func) (info->stream, " ");
+ }
+ break;
+ case 'd':
+ switch (GET_FIELD (insn, 20, 21))
+ {
+ case 1:
+ (*info->fprintf_func) (info->stream, ",co ");
+ break;
+ default:
+ (*info->fprintf_func) (info->stream, " ");
+ }
+ break;
+ case 'o':
+ (*info->fprintf_func) (info->stream, ",o");
+ break;
+ case 'g':
+ (*info->fprintf_func) (info->stream, ",gate");
+ break;
+ case 'p':
+ (*info->fprintf_func) (info->stream, ",l,push");
+ break;
+ case 'P':
+ (*info->fprintf_func) (info->stream, ",pop");
+ break;
+ case 'l':
+ case 'L':
+ (*info->fprintf_func) (info->stream, ",l");
+ break;
+ case 'w':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ read_write_names[GET_FIELD (insn, 25, 25)]);
+ break;
+ case 'W':
+ (*info->fprintf_func) (info->stream, ",w ");
+ break;
+ case 'r':
+ if (GET_FIELD (insn, 23, 26) == 5)
+ (*info->fprintf_func) (info->stream, ",r");
+ break;
+ case 'Z':
+ if (GET_FIELD (insn, 26, 26))
+ (*info->fprintf_func) (info->stream, ",m ");
+ else
+ (*info->fprintf_func) (info->stream, " ");
+ break;
+ case 'i':
+ if (GET_FIELD (insn, 25, 25))
+ (*info->fprintf_func) (info->stream, ",i");
+ break;
+ case 'z':
+ if (!GET_FIELD (insn, 21, 21))
+ (*info->fprintf_func) (info->stream, ",z");
+ break;
+ case 'a':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ add_compl_names[GET_FIELD (insn, 20, 21)]);
+ break;
+ case 'Y':
+ (*info->fprintf_func)
+ (info->stream, ",dc%s",
+ add_compl_names[GET_FIELD (insn, 20, 21)]);
+ break;
+ case 'y':
+ (*info->fprintf_func)
+ (info->stream, ",c%s",
+ add_compl_names[GET_FIELD (insn, 20, 21)]);
+ break;
+ case 'v':
+ if (GET_FIELD (insn, 20, 20))
+ (*info->fprintf_func) (info->stream, ",tsv");
+ break;
+ case 't':
+ (*info->fprintf_func) (info->stream, ",tc");
+ if (GET_FIELD (insn, 20, 20))
+ (*info->fprintf_func) (info->stream, ",tsv");
+ break;
+ case 'B':
+ (*info->fprintf_func) (info->stream, ",db");
+ if (GET_FIELD (insn, 20, 20))
+ (*info->fprintf_func) (info->stream, ",tsv");
+ break;
+ case 'b':
+ (*info->fprintf_func) (info->stream, ",b");
+ if (GET_FIELD (insn, 20, 20))
+ (*info->fprintf_func) (info->stream, ",tsv");
+ break;
+ case 'T':
+ if (GET_FIELD (insn, 25, 25))
+ (*info->fprintf_func) (info->stream, ",tc");
+ break;
+ case 'S':
+ /* EXTRD/W has a following condition. */
+ if (*(s + 1) == '?')
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ signed_unsigned_names[GET_FIELD (insn, 21, 21)]);
+ else
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ signed_unsigned_names[GET_FIELD (insn, 21, 21)]);
+ break;
+ case 'h':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ mix_half_names[GET_FIELD (insn, 17, 17)]);
+ break;
+ case 'H':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ saturation_names[GET_FIELD (insn, 24, 25)]);
+ break;
+ case '*':
+ (*info->fprintf_func)
+ (info->stream, ",%d%d%d%d ",
+ GET_FIELD (insn, 17, 18), GET_FIELD (insn, 20, 21),
+ GET_FIELD (insn, 22, 23), GET_FIELD (insn, 24, 25));
+ break;
+
+ case 'q':
+ {
+ int m, a;
+
+ m = GET_FIELD (insn, 28, 28);
+ a = GET_FIELD (insn, 29, 29);
+
+ if (m && !a)
+ fputs_filtered (",ma ", info);
+ else if (m && a)
+ fputs_filtered (",mb ", info);
+ else
+ fputs_filtered (" ", info);
+ break;
+ }
+
+ case 'J':
+ {
+ int opc = GET_FIELD (insn, 0, 5);
+
+ if (opc == 0x16 || opc == 0x1e)
+ {
+ if (GET_FIELD (insn, 29, 29) == 0)
+ fputs_filtered (",ma ", info);
+ else
+ fputs_filtered (",mb ", info);
+ }
+ else
+ fputs_filtered (" ", info);
+ break;
+ }
+
+ case 'e':
+ {
+ int opc = GET_FIELD (insn, 0, 5);
+
+ if (opc == 0x13 || opc == 0x1b)
+ {
+ if (GET_FIELD (insn, 18, 18) == 1)
+ fputs_filtered (",mb ", info);
+ else
+ fputs_filtered (",ma ", info);
+ }
+ else if (opc == 0x17 || opc == 0x1f)
+ {
+ if (GET_FIELD (insn, 31, 31) == 1)
+ fputs_filtered (",ma ", info);
+ else
+ fputs_filtered (",mb ", info);
+ }
+ else
+ fputs_filtered (" ", info);
+
+ break;
+ }
+ }
+ break;
+
+ /* Handle conditions. */
+ case '?':
+ {
+ s++;
+ switch (*s)
+ {
+ case 'f':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ float_comp_names[GET_FIELD (insn, 27, 31)]);
+ break;
+
+ /* These four conditions are for the set of instructions
+ which distinguish true/false conditions by opcode
+ rather than by the 'f' bit (sigh): comb, comib,
+ addb, addib. */
+ case 't':
+ fputs_filtered
+ (compare_cond_names[GET_FIELD (insn, 16, 18)], info);
+ break;
+ case 'n':
+ fputs_filtered
+ (compare_cond_names[GET_FIELD (insn, 16, 18)
+ + GET_FIELD (insn, 4, 4) * 8],
+ info);
+ break;
+ case 'N':
+ fputs_filtered
+ (compare_cond_64_names[GET_FIELD (insn, 16, 18)
+ + GET_FIELD (insn, 2, 2) * 8],
+ info);
+ break;
+ case 'Q':
+ fputs_filtered
+ (cmpib_cond_64_names[GET_FIELD (insn, 16, 18)],
+ info);
+ break;
+ case '@':
+ fputs_filtered
+ (add_cond_names[GET_FIELD (insn, 16, 18)
+ + GET_FIELD (insn, 4, 4) * 8],
+ info);
+ break;
+ case 's':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ compare_cond_names[GET_COND (insn)]);
+ break;
+ case 'S':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ compare_cond_64_names[GET_COND (insn)]);
+ break;
+ case 'a':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ add_cond_names[GET_COND (insn)]);
+ break;
+ case 'A':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ add_cond_64_names[GET_COND (insn)]);
+ break;
+ case 'd':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ add_cond_names[GET_FIELD (insn, 16, 18)]);
+ break;
+
+ case 'W':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ wide_add_cond_names[GET_FIELD (insn, 16, 18) +
+ GET_FIELD (insn, 4, 4) * 8]);
+ break;
+
+ case 'l':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ logical_cond_names[GET_COND (insn)]);
+ break;
+ case 'L':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ logical_cond_64_names[GET_COND (insn)]);
+ break;
+ case 'u':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ unit_cond_names[GET_COND (insn)]);
+ break;
+ case 'U':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ unit_cond_64_names[GET_COND (insn)]);
+ break;
+ case 'y':
+ case 'x':
+ case 'b':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ shift_cond_names[GET_FIELD (insn, 16, 18)]);
+
+ /* If the next character in args is 'n', it will handle
+ putting out the space. */
+ if (s[1] != 'n')
+ (*info->fprintf_func) (info->stream, " ");
+ break;
+ case 'X':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ shift_cond_64_names[GET_FIELD (insn, 16, 18)]);
+ break;
+ case 'B':
+ (*info->fprintf_func)
+ (info->stream, "%s",
+ bb_cond_64_names[GET_FIELD (insn, 16, 16)]);
+
+ /* If the next character in args is 'n', it will handle
+ putting out the space. */
+ if (s[1] != 'n')
+ (*info->fprintf_func) (info->stream, " ");
+ break;
+ }
+ break;
+ }
+
+ case 'V':
+ fput_const (extract_5_store (insn), info);
+ break;
+ case 'r':
+ fput_const (extract_5r_store (insn), info);
+ break;
+ case 'R':
+ fput_const (extract_5R_store (insn), info);
+ break;
+ case 'U':
+ fput_const (extract_10U_store (insn), info);
+ break;
+ case 'B':
+ case 'Q':
+ fput_const (extract_5Q_store (insn), info);
+ break;
+ case 'i':
+ fput_const (extract_11 (insn), info);
+ break;
+ case 'j':
+ fput_const (extract_14 (insn), info);
+ break;
+ case 'k':
+ fputs_filtered ("L%", info);
+ fput_const (extract_21 (insn), info);
+ break;
+ case '<':
+ case 'l':
+ /* 16-bit long disp., PA2.0 wide only. */
+ fput_const (extract_16 (insn), info);
+ break;
+ case 'n':
+ if (insn & 0x2)
+ (*info->fprintf_func) (info->stream, ",n ");
+ else
+ (*info->fprintf_func) (info->stream, " ");
+ break;
+ case 'N':
+ if ((insn & 0x20) && s[1])
+ (*info->fprintf_func) (info->stream, ",n ");
+ else if (insn & 0x20)
+ (*info->fprintf_func) (info->stream, ",n");
+ else if (s[1])
+ (*info->fprintf_func) (info->stream, " ");
+ break;
+ case 'w':
+ (*info->print_address_func)
+ (memaddr + 8 + extract_12 (insn), info);
+ break;
+ case 'W':
+ /* 17 bit PC-relative branch. */
+ (*info->print_address_func)
+ ((memaddr + 8 + extract_17 (insn)), info);
+ break;
+ case 'z':
+ /* 17 bit displacement. This is an offset from a register
+ so it gets disasssembled as just a number, not any sort
+ of address. */
+ fput_const (extract_17 (insn), info);
+ break;
+
+ case 'Z':
+ /* addil %r1 implicit output. */
+ fputs_filtered ("r1", info);
+ break;
+
+ case 'Y':
+ /* be,l %sr0,%r31 implicit output. */
+ fputs_filtered ("sr0,r31", info);
+ break;
+
+ case '@':
+ (*info->fprintf_func) (info->stream, "0");
+ break;
+
+ case '.':
+ (*info->fprintf_func) (info->stream, "%d",
+ GET_FIELD (insn, 24, 25));
+ break;
+ case '*':
+ (*info->fprintf_func) (info->stream, "%d",
+ GET_FIELD (insn, 22, 25));
+ break;
+ case '!':
+ fputs_filtered ("sar", info);
+ break;
+ case 'p':
+ (*info->fprintf_func) (info->stream, "%d",
+ 31 - GET_FIELD (insn, 22, 26));
+ break;
+ case '~':
+ {
+ int num;
+ num = GET_FIELD (insn, 20, 20) << 5;
+ num |= GET_FIELD (insn, 22, 26);
+ (*info->fprintf_func) (info->stream, "%d", 63 - num);
+ break;
+ }
+ case 'P':
+ (*info->fprintf_func) (info->stream, "%d",
+ GET_FIELD (insn, 22, 26));
+ break;
+ case 'q':
+ {
+ int num;
+ num = GET_FIELD (insn, 20, 20) << 5;
+ num |= GET_FIELD (insn, 22, 26);
+ (*info->fprintf_func) (info->stream, "%d", num);
+ break;
+ }
+ case 'T':
+ (*info->fprintf_func) (info->stream, "%d",
+ 32 - GET_FIELD (insn, 27, 31));
+ break;
+ case '%':
+ {
+ int num;
+ num = (GET_FIELD (insn, 23, 23) + 1) * 32;
+ num -= GET_FIELD (insn, 27, 31);
+ (*info->fprintf_func) (info->stream, "%d", num);
+ break;
+ }
+ case '|':
+ {
+ int num;
+ num = (GET_FIELD (insn, 19, 19) + 1) * 32;
+ num -= GET_FIELD (insn, 27, 31);
+ (*info->fprintf_func) (info->stream, "%d", num);
+ break;
+ }
+ case '$':
+ fput_const (GET_FIELD (insn, 20, 28), info);
+ break;
+ case 'A':
+ fput_const (GET_FIELD (insn, 6, 18), info);
+ break;
+ case 'D':
+ fput_const (GET_FIELD (insn, 6, 31), info);
+ break;
+ case 'v':
+ (*info->fprintf_func) (info->stream, ",%d",
+ GET_FIELD (insn, 23, 25));
+ break;
+ case 'O':
+ fput_const ((GET_FIELD (insn, 6,20) << 5 |
+ GET_FIELD (insn, 27, 31)), info);
+ break;
+ case 'o':
+ fput_const (GET_FIELD (insn, 6, 20), info);
+ break;
+ case '2':
+ fput_const ((GET_FIELD (insn, 6, 22) << 5 |
+ GET_FIELD (insn, 27, 31)), info);
+ break;
+ case '1':
+ fput_const ((GET_FIELD (insn, 11, 20) << 5 |
+ GET_FIELD (insn, 27, 31)), info);
+ break;
+ case '0':
+ fput_const ((GET_FIELD (insn, 16, 20) << 5 |
+ GET_FIELD (insn, 27, 31)), info);
+ break;
+ case 'u':
+ (*info->fprintf_func) (info->stream, ",%d",
+ GET_FIELD (insn, 23, 25));
+ break;
+ case 'F':
+ /* If no destination completer and not before a completer
+ for fcmp, need a space here. */
+ if (s[1] == 'G' || s[1] == '?')
+ fputs_filtered
+ (float_format_names[GET_FIELD (insn, 19, 20)], info);
+ else
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ float_format_names[GET_FIELD (insn, 19, 20)]);
+ break;
+ case 'G':
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ float_format_names[GET_FIELD (insn, 17, 18)]);
+ break;
+ case 'H':
+ if (GET_FIELD (insn, 26, 26) == 1)
+ (*info->fprintf_func) (info->stream, "%s ",
+ float_format_names[0]);
+ else
+ (*info->fprintf_func) (info->stream, "%s ",
+ float_format_names[1]);
+ break;
+ case 'I':
+ /* If no destination completer and not before a completer
+ for fcmp, need a space here. */
+ if (s[1] == '?')
+ fputs_filtered
+ (float_format_names[GET_FIELD (insn, 20, 20)], info);
+ else
+ (*info->fprintf_func)
+ (info->stream, "%s ",
+ float_format_names[GET_FIELD (insn, 20, 20)]);
+ break;
+
+ case 'J':
+ fput_const (extract_14 (insn), info);
+ break;
+
+ case '#':
+ {
+ int sign = GET_FIELD (insn, 31, 31);
+ int imm10 = GET_FIELD (insn, 18, 27);
+ int disp;
+
+ if (sign)
+ disp = (-1 << 10) | imm10;
+ else
+ disp = imm10;
+
+ disp <<= 3;
+ fput_const (disp, info);
+ break;
+ }
+ case 'K':
+ case 'd':
+ {
+ int sign = GET_FIELD (insn, 31, 31);
+ int imm11 = GET_FIELD (insn, 18, 28);
+ int disp;
+
+ if (sign)
+ disp = (-1 << 11) | imm11;
+ else
+ disp = imm11;
+
+ disp <<= 2;
+ fput_const (disp, info);
+ break;
+ }
+
+ case '>':
+ case 'y':
+ {
+ /* 16-bit long disp., PA2.0 wide only. */
+ int disp = extract_16 (insn);
+ disp &= ~3;
+ fput_const (disp, info);
+ break;
+ }
+
+ case '&':
+ {
+ /* 16-bit long disp., PA2.0 wide only. */
+ int disp = extract_16 (insn);
+ disp &= ~7;
+ fput_const (disp, info);
+ break;
+ }
+
+ case '_':
+ break; /* Dealt with by '{' */
+
+ case '{':
+ {
+ int sub = GET_FIELD (insn, 14, 16);
+ int df = GET_FIELD (insn, 17, 18);
+ int sf = GET_FIELD (insn, 19, 20);
+ const char * const * source = float_format_names;
+ const char * const * dest = float_format_names;
+ const char *t = "";
+
+ if (sub == 4)
+ {
+ fputs_filtered (",UND ", info);
+ break;
+ }
+ if ((sub & 3) == 3)
+ t = ",t";
+ if ((sub & 3) == 1)
+ source = sub & 4 ? fcnv_ufixed_names : fcnv_fixed_names;
+ if (sub & 2)
+ dest = sub & 4 ? fcnv_ufixed_names : fcnv_fixed_names;
+
+ (*info->fprintf_func) (info->stream, "%s%s%s ",
+ t, source[sf], dest[df]);
+ break;
+ }
+
+ case 'm':
+ {
+ int y = GET_FIELD (insn, 16, 18);
+
+ if (y != 1)
+ fput_const ((y ^ 1) - 1, info);
+ }
+ break;
+
+ case 'h':
+ {
+ int cbit;
+
+ cbit = GET_FIELD (insn, 16, 18);
+
+ if (cbit > 0)
+ (*info->fprintf_func) (info->stream, ",%d", cbit - 1);
+ break;
+ }
+
+ case '=':
+ {
+ int cond = GET_FIELD (insn, 27, 31);
+
+ switch (cond)
+ {
+ case 0: fputs_filtered (" ", info); break;
+ case 1: fputs_filtered ("acc ", info); break;
+ case 2: fputs_filtered ("rej ", info); break;
+ case 5: fputs_filtered ("acc8 ", info); break;
+ case 6: fputs_filtered ("rej8 ", info); break;
+ case 9: fputs_filtered ("acc6 ", info); break;
+ case 13: fputs_filtered ("acc4 ", info); break;
+ case 17: fputs_filtered ("acc2 ", info); break;
+ default: break;
+ }
+ break;
+ }
+
+ case 'X':
+ (*info->print_address_func)
+ (memaddr + 8 + extract_22 (insn), info);
+ break;
+ case 'L':
+ fputs_filtered (",rp", info);
+ break;
+ default:
+ (*info->fprintf_func) (info->stream, "%c", *s);
+ break;
+ }
+ }
+ return sizeof (insn);
+ }
+ }
+ (*info->fprintf_func) (info->stream, "#%8x", insn);
+ return sizeof (insn);
+}
diff --git a/disas/i386.c b/disas/i386.c
index 57145d0a6b..07f871fd64 100644
--- a/disas/i386.c
+++ b/disas/i386.c
@@ -682,6 +682,7 @@ fetch_data(struct disassemble_info *info, bfd_byte *addr)
#define PREGRP104 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 104 } }
#define PREGRP105 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 105 } }
#define PREGRP106 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 106 } }
+#define PREGRP107 NULL, { { NULL, USE_PREFIX_USER_TABLE }, { NULL, 107 } }
#define X86_64_0 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 0 } }
#define X86_64_1 NULL, { { NULL, X86_64_SPECIAL }, { NULL, 1 } }
@@ -1247,7 +1248,7 @@ static const struct dis386 dis386_twobyte[] = {
{ "ud2b", { XX } },
{ GRP8 },
{ "btcS", { Ev, Gv } },
- { "bsfS", { Gv, Ev } },
+ { PREGRP107 },
{ PREGRP36 },
{ "movs{bR|x|bR|x}", { Gv, Eb } },
{ "movs{wR|x|wR|x}", { Gv, Ew } }, /* yes, there really is movsww ! */
@@ -1431,7 +1432,7 @@ static const unsigned char twobyte_uses_REPZ_prefix[256] = {
/* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
/* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
/* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
- /* b0 */ 0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0, /* bf */
+ /* b0 */ 0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0, /* bf */
/* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
/* d0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
/* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
@@ -2800,6 +2801,13 @@ static const struct dis386 prefix_user_table[][4] = {
{ "shrxS", { Gv, Ev, Bv } },
},
+ /* PREGRP107 */
+ {
+ { "bsfS", { Gv, Ev } },
+ { "tzcntS", { Gv, Ev } },
+ { "bsfS", { Gv, Ev } },
+ { "(bad)", { XX } },
+ },
};
static const struct dis386 x86_64_table[][2] = {
diff --git a/disas/nios2.c b/disas/nios2.c
new file mode 100644
index 0000000000..b342936d21
--- /dev/null
+++ b/disas/nios2.c
@@ -0,0 +1,3534 @@
+/* Nios II opcode library for QEMU.
+ Copyright (C) 2012-2016 Free Software Foundation, Inc.
+ Contributed by Nigel Gray (ngray@altera.com).
+ Contributed by Mentor Graphics, Inc.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+/* This file resembles a concatenation of the following files from
+ binutils:
+
+ include/opcode/nios2.h
+ include/opcode/nios2r1.h
+ include/opcode/nios2r2.h
+ opcodes/nios2-opc.c
+ opcodes/nios2-dis.c
+
+ It has been derived from the original patches which have been
+ relicensed by the contributors as GPL version 2 for inclusion
+ in QEMU. */
+
+#ifndef _NIOS2_H_
+#define _NIOS2_H_
+
+/*#include "bfd.h"*/
+#include "qemu/osdep.h"
+#include "disas/bfd.h"
+
+
+/****************************************************************************
+ * This file contains structures, bit masks and shift counts used
+ * by the GNU toolchain to define the Nios II instruction set and
+ * access various opcode fields.
+ ****************************************************************************/
+
+/* Instruction encoding formats. */
+enum iw_format_type {
+ /* R1 formats. */
+ iw_i_type,
+ iw_r_type,
+ iw_j_type,
+ iw_custom_type,
+
+ /* 32-bit R2 formats. */
+ iw_L26_type,
+ iw_F2I16_type,
+ iw_F2X4I12_type,
+ iw_F1X4I12_type,
+ iw_F1X4L17_type,
+ iw_F3X6L5_type,
+ iw_F2X6L10_type,
+ iw_F3X6_type,
+ iw_F3X8_type,
+
+ /* 16-bit R2 formats. */
+ iw_I10_type,
+ iw_T1I7_type,
+ iw_T2I4_type,
+ iw_T1X1I6_type,
+ iw_X1I7_type,
+ iw_L5I4X1_type,
+ iw_T2X1L3_type,
+ iw_T2X1I3_type,
+ iw_T3X1_type,
+ iw_T2X3_type,
+ iw_F1X1_type,
+ iw_X2L5_type,
+ iw_F1I5_type,
+ iw_F2_type
+};
+
+/* Identify different overflow situations for error messages. */
+enum overflow_type
+{
+ call_target_overflow = 0,
+ branch_target_overflow,
+ address_offset_overflow,
+ signed_immed16_overflow,
+ unsigned_immed16_overflow,
+ unsigned_immed5_overflow,
+ signed_immed12_overflow,
+ custom_opcode_overflow,
+ enumeration_overflow,
+ no_overflow
+};
+
+/* This structure holds information for a particular instruction.
+
+ The args field is a string describing the operands. The following
+ letters can appear in the args:
+ c - a 5-bit control register index
+ d - a 5-bit destination register index
+ s - a 5-bit left source register index
+ t - a 5-bit right source register index
+ D - a 3-bit encoded destination register
+ S - a 3-bit encoded left source register
+ T - a 3-bit encoded right source register
+ i - a 16-bit signed immediate
+ j - a 5-bit unsigned immediate
+ k - a (second) 5-bit unsigned immediate
+ l - a 8-bit custom instruction constant
+ m - a 26-bit unsigned immediate
+ o - a 16-bit signed pc-relative offset
+ u - a 16-bit unsigned immediate
+ I - a 12-bit signed immediate
+ M - a 6-bit unsigned immediate
+ N - a 6-bit unsigned immediate with 2-bit shift
+ O - a 10-bit signed pc-relative offset with 1-bit shift
+ P - a 7-bit signed pc-relative offset with 1-bit shift
+ U - a 7-bit unsigned immediate with 2-bit shift
+ V - a 5-bit unsigned immediate with 2-bit shift
+ W - a 4-bit unsigned immediate with 2-bit shift
+ X - a 4-bit unsigned immediate with 1-bit shift
+ Y - a 4-bit unsigned immediate
+ e - an immediate coded as an enumeration for addi.n/subi.n
+ f - an immediate coded as an enumeration for slli.n/srli.n
+ g - an immediate coded as an enumeration for andi.n
+ h - an immediate coded as an enumeration for movi.n
+ R - a reglist for ldwm/stwm or push.n/pop.n
+ B - a base register specifier and option list for ldwm/stwm
+ Literal ',', '(', and ')' characters may also appear in the args as
+ delimiters.
+
+ Note that the args describe the semantics and assembly-language syntax
+ of the operands, not their encoding into the instruction word.
+
+ The pinfo field is INSN_MACRO for a macro. Otherwise, it is a collection
+ of bits describing the instruction, notably any relevant hazard
+ information.
+
+ When assembling, the match field contains the opcode template, which
+ is modified by the arguments to produce the actual opcode
+ that is emitted. If pinfo is INSN_MACRO, then this is 0.
+
+ If pinfo is INSN_MACRO, the mask field stores the macro identifier.
+ Otherwise this is a bit mask for the relevant portions of the opcode
+ when disassembling. If the actual opcode anded with the match field
+ equals the opcode field, then we have found the correct instruction. */
+
+struct nios2_opcode
+{
+ const char *name; /* The name of the instruction. */
+ const char *args; /* A string describing the arguments for this
+ instruction. */
+ const char *args_test; /* Like args, but with an extra argument for
+ the expected opcode. */
+ unsigned long num_args; /* The number of arguments the instruction
+ takes. */
+ unsigned size; /* Size in bytes of the instruction. */
+ enum iw_format_type format; /* Instruction format. */
+ unsigned long match; /* The basic opcode for the instruction. */
+ unsigned long mask; /* Mask for the opcode field of the
+ instruction. */
+ unsigned long pinfo; /* Is this a real instruction or instruction
+ macro? */
+ enum overflow_type overflow_msg; /* Used to generate informative
+ message when fixup overflows. */
+};
+
+/* This value is used in the nios2_opcode.pinfo field to indicate that the
+ instruction is a macro or pseudo-op. This requires special treatment by
+ the assembler, and is used by the disassembler to determine whether to
+ check for a nop. */
+#define NIOS2_INSN_MACRO 0x80000000
+#define NIOS2_INSN_MACRO_MOV 0x80000001
+#define NIOS2_INSN_MACRO_MOVI 0x80000002
+#define NIOS2_INSN_MACRO_MOVIA 0x80000004
+
+#define NIOS2_INSN_RELAXABLE 0x40000000
+#define NIOS2_INSN_UBRANCH 0x00000010
+#define NIOS2_INSN_CBRANCH 0x00000020
+#define NIOS2_INSN_CALL 0x00000040
+
+#define NIOS2_INSN_OPTARG 0x00000080
+
+/* Register attributes. */
+#define REG_NORMAL (1<<0) /* Normal registers. */
+#define REG_CONTROL (1<<1) /* Control registers. */
+#define REG_COPROCESSOR (1<<2) /* For custom instructions. */
+#define REG_3BIT (1<<3) /* For R2 CDX instructions. */
+#define REG_LDWM (1<<4) /* For R2 ldwm/stwm. */
+#define REG_POP (1<<5) /* For R2 pop.n/push.n. */
+
+struct nios2_reg
+{
+ const char *name;
+ const int index;
+ unsigned long regtype;
+};
+
+/* Pull in the instruction field accessors, opcodes, and masks. */
+/*#include "nios2r1.h"*/
+
+#ifndef _NIOS2R1_H_
+#define _NIOS2R1_H_
+
+/* R1 fields. */
+#define IW_R1_OP_LSB 0
+#define IW_R1_OP_SIZE 6
+#define IW_R1_OP_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R1_OP_SIZE))
+#define IW_R1_OP_SHIFTED_MASK (IW_R1_OP_UNSHIFTED_MASK << IW_R1_OP_LSB)
+#define GET_IW_R1_OP(W) (((W) >> IW_R1_OP_LSB) & IW_R1_OP_UNSHIFTED_MASK)
+#define SET_IW_R1_OP(V) (((V) & IW_R1_OP_UNSHIFTED_MASK) << IW_R1_OP_LSB)
+
+#define IW_I_A_LSB 27
+#define IW_I_A_SIZE 5
+#define IW_I_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I_A_SIZE))
+#define IW_I_A_SHIFTED_MASK (IW_I_A_UNSHIFTED_MASK << IW_I_A_LSB)
+#define GET_IW_I_A(W) (((W) >> IW_I_A_LSB) & IW_I_A_UNSHIFTED_MASK)
+#define SET_IW_I_A(V) (((V) & IW_I_A_UNSHIFTED_MASK) << IW_I_A_LSB)
+
+#define IW_I_B_LSB 22
+#define IW_I_B_SIZE 5
+#define IW_I_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I_B_SIZE))
+#define IW_I_B_SHIFTED_MASK (IW_I_B_UNSHIFTED_MASK << IW_I_B_LSB)
+#define GET_IW_I_B(W) (((W) >> IW_I_B_LSB) & IW_I_B_UNSHIFTED_MASK)
+#define SET_IW_I_B(V) (((V) & IW_I_B_UNSHIFTED_MASK) << IW_I_B_LSB)
+
+#define IW_I_IMM16_LSB 6
+#define IW_I_IMM16_SIZE 16
+#define IW_I_IMM16_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I_IMM16_SIZE))
+#define IW_I_IMM16_SHIFTED_MASK (IW_I_IMM16_UNSHIFTED_MASK << IW_I_IMM16_LSB)
+#define GET_IW_I_IMM16(W) (((W) >> IW_I_IMM16_LSB) & IW_I_IMM16_UNSHIFTED_MASK)
+#define SET_IW_I_IMM16(V) (((V) & IW_I_IMM16_UNSHIFTED_MASK) << IW_I_IMM16_LSB)
+
+#define IW_R_A_LSB 27
+#define IW_R_A_SIZE 5
+#define IW_R_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_A_SIZE))
+#define IW_R_A_SHIFTED_MASK (IW_R_A_UNSHIFTED_MASK << IW_R_A_LSB)
+#define GET_IW_R_A(W) (((W) >> IW_R_A_LSB) & IW_R_A_UNSHIFTED_MASK)
+#define SET_IW_R_A(V) (((V) & IW_R_A_UNSHIFTED_MASK) << IW_R_A_LSB)
+
+#define IW_R_B_LSB 22
+#define IW_R_B_SIZE 5
+#define IW_R_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_B_SIZE))
+#define IW_R_B_SHIFTED_MASK (IW_R_B_UNSHIFTED_MASK << IW_R_B_LSB)
+#define GET_IW_R_B(W) (((W) >> IW_R_B_LSB) & IW_R_B_UNSHIFTED_MASK)
+#define SET_IW_R_B(V) (((V) & IW_R_B_UNSHIFTED_MASK) << IW_R_B_LSB)
+
+#define IW_R_C_LSB 17
+#define IW_R_C_SIZE 5
+#define IW_R_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_C_SIZE))
+#define IW_R_C_SHIFTED_MASK (IW_R_C_UNSHIFTED_MASK << IW_R_C_LSB)
+#define GET_IW_R_C(W) (((W) >> IW_R_C_LSB) & IW_R_C_UNSHIFTED_MASK)
+#define SET_IW_R_C(V) (((V) & IW_R_C_UNSHIFTED_MASK) << IW_R_C_LSB)
+
+#define IW_R_OPX_LSB 11
+#define IW_R_OPX_SIZE 6
+#define IW_R_OPX_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_OPX_SIZE))
+#define IW_R_OPX_SHIFTED_MASK (IW_R_OPX_UNSHIFTED_MASK << IW_R_OPX_LSB)
+#define GET_IW_R_OPX(W) (((W) >> IW_R_OPX_LSB) & IW_R_OPX_UNSHIFTED_MASK)
+#define SET_IW_R_OPX(V) (((V) & IW_R_OPX_UNSHIFTED_MASK) << IW_R_OPX_LSB)
+
+#define IW_R_IMM5_LSB 6
+#define IW_R_IMM5_SIZE 5
+#define IW_R_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_IMM5_SIZE))
+#define IW_R_IMM5_SHIFTED_MASK (IW_R_IMM5_UNSHIFTED_MASK << IW_R_IMM5_LSB)
+#define GET_IW_R_IMM5(W) (((W) >> IW_R_IMM5_LSB) & IW_R_IMM5_UNSHIFTED_MASK)
+#define SET_IW_R_IMM5(V) (((V) & IW_R_IMM5_UNSHIFTED_MASK) << IW_R_IMM5_LSB)
+
+#define IW_J_IMM26_LSB 6
+#define IW_J_IMM26_SIZE 26
+#define IW_J_IMM26_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_J_IMM26_SIZE))
+#define IW_J_IMM26_SHIFTED_MASK (IW_J_IMM26_UNSHIFTED_MASK << IW_J_IMM26_LSB)
+#define GET_IW_J_IMM26(W) (((W) >> IW_J_IMM26_LSB) & IW_J_IMM26_UNSHIFTED_MASK)
+#define SET_IW_J_IMM26(V) (((V) & IW_J_IMM26_UNSHIFTED_MASK) << IW_J_IMM26_LSB)
+
+#define IW_CUSTOM_A_LSB 27
+#define IW_CUSTOM_A_SIZE 5
+#define IW_CUSTOM_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_A_SIZE))
+#define IW_CUSTOM_A_SHIFTED_MASK (IW_CUSTOM_A_UNSHIFTED_MASK << IW_CUSTOM_A_LSB)
+#define GET_IW_CUSTOM_A(W) (((W) >> IW_CUSTOM_A_LSB) & IW_CUSTOM_A_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_A(V) (((V) & IW_CUSTOM_A_UNSHIFTED_MASK) << IW_CUSTOM_A_LSB)
+
+#define IW_CUSTOM_B_LSB 22
+#define IW_CUSTOM_B_SIZE 5
+#define IW_CUSTOM_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_B_SIZE))
+#define IW_CUSTOM_B_SHIFTED_MASK (IW_CUSTOM_B_UNSHIFTED_MASK << IW_CUSTOM_B_LSB)
+#define GET_IW_CUSTOM_B(W) (((W) >> IW_CUSTOM_B_LSB) & IW_CUSTOM_B_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_B(V) (((V) & IW_CUSTOM_B_UNSHIFTED_MASK) << IW_CUSTOM_B_LSB)
+
+#define IW_CUSTOM_C_LSB 17
+#define IW_CUSTOM_C_SIZE 5
+#define IW_CUSTOM_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_C_SIZE))
+#define IW_CUSTOM_C_SHIFTED_MASK (IW_CUSTOM_C_UNSHIFTED_MASK << IW_CUSTOM_C_LSB)
+#define GET_IW_CUSTOM_C(W) (((W) >> IW_CUSTOM_C_LSB) & IW_CUSTOM_C_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_C(V) (((V) & IW_CUSTOM_C_UNSHIFTED_MASK) << IW_CUSTOM_C_LSB)
+
+#define IW_CUSTOM_READA_LSB 16
+#define IW_CUSTOM_READA_SIZE 1
+#define IW_CUSTOM_READA_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_READA_SIZE))
+#define IW_CUSTOM_READA_SHIFTED_MASK (IW_CUSTOM_READA_UNSHIFTED_MASK << IW_CUSTOM_READA_LSB)
+#define GET_IW_CUSTOM_READA(W) (((W) >> IW_CUSTOM_READA_LSB) & IW_CUSTOM_READA_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_READA(V) (((V) & IW_CUSTOM_READA_UNSHIFTED_MASK) << IW_CUSTOM_READA_LSB)
+
+#define IW_CUSTOM_READB_LSB 15
+#define IW_CUSTOM_READB_SIZE 1
+#define IW_CUSTOM_READB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_READB_SIZE))
+#define IW_CUSTOM_READB_SHIFTED_MASK (IW_CUSTOM_READB_UNSHIFTED_MASK << IW_CUSTOM_READB_LSB)
+#define GET_IW_CUSTOM_READB(W) (((W) >> IW_CUSTOM_READB_LSB) & IW_CUSTOM_READB_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_READB(V) (((V) & IW_CUSTOM_READB_UNSHIFTED_MASK) << IW_CUSTOM_READB_LSB)
+
+#define IW_CUSTOM_READC_LSB 14
+#define IW_CUSTOM_READC_SIZE 1
+#define IW_CUSTOM_READC_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_READC_SIZE))
+#define IW_CUSTOM_READC_SHIFTED_MASK (IW_CUSTOM_READC_UNSHIFTED_MASK << IW_CUSTOM_READC_LSB)
+#define GET_IW_CUSTOM_READC(W) (((W) >> IW_CUSTOM_READC_LSB) & IW_CUSTOM_READC_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_READC(V) (((V) & IW_CUSTOM_READC_UNSHIFTED_MASK) << IW_CUSTOM_READC_LSB)
+
+#define IW_CUSTOM_N_LSB 6
+#define IW_CUSTOM_N_SIZE 8
+#define IW_CUSTOM_N_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_CUSTOM_N_SIZE))
+#define IW_CUSTOM_N_SHIFTED_MASK (IW_CUSTOM_N_UNSHIFTED_MASK << IW_CUSTOM_N_LSB)
+#define GET_IW_CUSTOM_N(W) (((W) >> IW_CUSTOM_N_LSB) & IW_CUSTOM_N_UNSHIFTED_MASK)
+#define SET_IW_CUSTOM_N(V) (((V) & IW_CUSTOM_N_UNSHIFTED_MASK) << IW_CUSTOM_N_LSB)
+
+/* R1 opcodes. */
+#define R1_OP_CALL 0
+#define R1_OP_JMPI 1
+#define R1_OP_LDBU 3
+#define R1_OP_ADDI 4
+#define R1_OP_STB 5
+#define R1_OP_BR 6
+#define R1_OP_LDB 7
+#define R1_OP_CMPGEI 8
+#define R1_OP_LDHU 11
+#define R1_OP_ANDI 12
+#define R1_OP_STH 13
+#define R1_OP_BGE 14
+#define R1_OP_LDH 15
+#define R1_OP_CMPLTI 16
+#define R1_OP_INITDA 19
+#define R1_OP_ORI 20
+#define R1_OP_STW 21
+#define R1_OP_BLT 22
+#define R1_OP_LDW 23
+#define R1_OP_CMPNEI 24
+#define R1_OP_FLUSHDA 27
+#define R1_OP_XORI 28
+#define R1_OP_BNE 30
+#define R1_OP_CMPEQI 32
+#define R1_OP_LDBUIO 35
+#define R1_OP_MULI 36
+#define R1_OP_STBIO 37
+#define R1_OP_BEQ 38
+#define R1_OP_LDBIO 39
+#define R1_OP_CMPGEUI 40
+#define R1_OP_LDHUIO 43
+#define R1_OP_ANDHI 44
+#define R1_OP_STHIO 45
+#define R1_OP_BGEU 46
+#define R1_OP_LDHIO 47
+#define R1_OP_CMPLTUI 48
+#define R1_OP_CUSTOM 50
+#define R1_OP_INITD 51
+#define R1_OP_ORHI 52
+#define R1_OP_STWIO 53
+#define R1_OP_BLTU 54
+#define R1_OP_LDWIO 55
+#define R1_OP_RDPRS 56
+#define R1_OP_OPX 58
+#define R1_OP_FLUSHD 59
+#define R1_OP_XORHI 60
+
+#define R1_OPX_ERET 1
+#define R1_OPX_ROLI 2
+#define R1_OPX_ROL 3
+#define R1_OPX_FLUSHP 4
+#define R1_OPX_RET 5
+#define R1_OPX_NOR 6
+#define R1_OPX_MULXUU 7
+#define R1_OPX_CMPGE 8
+#define R1_OPX_BRET 9
+#define R1_OPX_ROR 11
+#define R1_OPX_FLUSHI 12
+#define R1_OPX_JMP 13
+#define R1_OPX_AND 14
+#define R1_OPX_CMPLT 16
+#define R1_OPX_SLLI 18
+#define R1_OPX_SLL 19
+#define R1_OPX_WRPRS 20
+#define R1_OPX_OR 22
+#define R1_OPX_MULXSU 23
+#define R1_OPX_CMPNE 24
+#define R1_OPX_SRLI 26
+#define R1_OPX_SRL 27
+#define R1_OPX_NEXTPC 28
+#define R1_OPX_CALLR 29
+#define R1_OPX_XOR 30
+#define R1_OPX_MULXSS 31
+#define R1_OPX_CMPEQ 32
+#define R1_OPX_DIVU 36
+#define R1_OPX_DIV 37
+#define R1_OPX_RDCTL 38
+#define R1_OPX_MUL 39
+#define R1_OPX_CMPGEU 40
+#define R1_OPX_INITI 41
+#define R1_OPX_TRAP 45
+#define R1_OPX_WRCTL 46
+#define R1_OPX_CMPLTU 48
+#define R1_OPX_ADD 49
+#define R1_OPX_BREAK 52
+#define R1_OPX_SYNC 54
+#define R1_OPX_SUB 57
+#define R1_OPX_SRAI 58
+#define R1_OPX_SRA 59
+
+/* Some convenience macros for R1 encodings, for use in instruction tables.
+ MATCH_R1_OPX0(NAME) and MASK_R1_OPX0 are used for R-type instructions
+ with 3 register operands and constant 0 in the immediate field.
+ The general forms are MATCH_R1_OPX(NAME, A, B, C) where the arguments specify
+ constant values and MASK_R1_OPX(A, B, C, N) where the arguments are booleans
+ that are true if the field should be included in the mask.
+ */
+#define MATCH_R1_OP(NAME) \
+ (SET_IW_R1_OP (R1_OP_##NAME))
+#define MASK_R1_OP \
+ IW_R1_OP_SHIFTED_MASK
+
+#define MATCH_R1_OPX0(NAME) \
+ (SET_IW_R1_OP (R1_OP_OPX) | SET_IW_R_OPX (R1_OPX_##NAME))
+#define MASK_R1_OPX0 \
+ (IW_R1_OP_SHIFTED_MASK | IW_R_OPX_SHIFTED_MASK | IW_R_IMM5_SHIFTED_MASK)
+
+#define MATCH_R1_OPX(NAME, A, B, C) \
+ (MATCH_R1_OPX0 (NAME) | SET_IW_R_A (A) | SET_IW_R_B (B) | SET_IW_R_C (C))
+#define MASK_R1_OPX(A, B, C, N) \
+ (IW_R1_OP_SHIFTED_MASK | IW_R_OPX_SHIFTED_MASK \
+ | (A ? IW_R_A_SHIFTED_MASK : 0) \
+ | (B ? IW_R_B_SHIFTED_MASK : 0) \
+ | (C ? IW_R_C_SHIFTED_MASK : 0) \
+ | (N ? IW_R_IMM5_SHIFTED_MASK : 0))
+
+/* And here's the match/mask macros for the R1 instruction set. */
+#define MATCH_R1_ADD MATCH_R1_OPX0 (ADD)
+#define MASK_R1_ADD MASK_R1_OPX0
+#define MATCH_R1_ADDI MATCH_R1_OP (ADDI)
+#define MASK_R1_ADDI MASK_R1_OP
+#define MATCH_R1_AND MATCH_R1_OPX0 (AND)
+#define MASK_R1_AND MASK_R1_OPX0
+#define MATCH_R1_ANDHI MATCH_R1_OP (ANDHI)
+#define MASK_R1_ANDHI MASK_R1_OP
+#define MATCH_R1_ANDI MATCH_R1_OP (ANDI)
+#define MASK_R1_ANDI MASK_R1_OP
+#define MATCH_R1_BEQ MATCH_R1_OP (BEQ)
+#define MASK_R1_BEQ MASK_R1_OP
+#define MATCH_R1_BGE MATCH_R1_OP (BGE)
+#define MASK_R1_BGE MASK_R1_OP
+#define MATCH_R1_BGEU MATCH_R1_OP (BGEU)
+#define MASK_R1_BGEU MASK_R1_OP
+#define MATCH_R1_BGT MATCH_R1_OP (BLT)
+#define MASK_R1_BGT MASK_R1_OP
+#define MATCH_R1_BGTU MATCH_R1_OP (BLTU)
+#define MASK_R1_BGTU MASK_R1_OP
+#define MATCH_R1_BLE MATCH_R1_OP (BGE)
+#define MASK_R1_BLE MASK_R1_OP
+#define MATCH_R1_BLEU MATCH_R1_OP (BGEU)
+#define MASK_R1_BLEU MASK_R1_OP
+#define MATCH_R1_BLT MATCH_R1_OP (BLT)
+#define MASK_R1_BLT MASK_R1_OP
+#define MATCH_R1_BLTU MATCH_R1_OP (BLTU)
+#define MASK_R1_BLTU MASK_R1_OP
+#define MATCH_R1_BNE MATCH_R1_OP (BNE)
+#define MASK_R1_BNE MASK_R1_OP
+#define MATCH_R1_BR MATCH_R1_OP (BR)
+#define MASK_R1_BR MASK_R1_OP | IW_I_A_SHIFTED_MASK | IW_I_B_SHIFTED_MASK
+#define MATCH_R1_BREAK MATCH_R1_OPX (BREAK, 0, 0, 0x1e)
+#define MASK_R1_BREAK MASK_R1_OPX (1, 1, 1, 0)
+#define MATCH_R1_BRET MATCH_R1_OPX (BRET, 0x1e, 0, 0)
+#define MASK_R1_BRET MASK_R1_OPX (1, 1, 1, 1)
+#define MATCH_R1_CALL MATCH_R1_OP (CALL)
+#define MASK_R1_CALL MASK_R1_OP
+#define MATCH_R1_CALLR MATCH_R1_OPX (CALLR, 0, 0, 0x1f)
+#define MASK_R1_CALLR MASK_R1_OPX (0, 1, 1, 1)
+#define MATCH_R1_CMPEQ MATCH_R1_OPX0 (CMPEQ)
+#define MASK_R1_CMPEQ MASK_R1_OPX0
+#define MATCH_R1_CMPEQI MATCH_R1_OP (CMPEQI)
+#define MASK_R1_CMPEQI MASK_R1_OP
+#define MATCH_R1_CMPGE MATCH_R1_OPX0 (CMPGE)
+#define MASK_R1_CMPGE MASK_R1_OPX0
+#define MATCH_R1_CMPGEI MATCH_R1_OP (CMPGEI)
+#define MASK_R1_CMPGEI MASK_R1_OP
+#define MATCH_R1_CMPGEU MATCH_R1_OPX0 (CMPGEU)
+#define MASK_R1_CMPGEU MASK_R1_OPX0
+#define MATCH_R1_CMPGEUI MATCH_R1_OP (CMPGEUI)
+#define MASK_R1_CMPGEUI MASK_R1_OP
+#define MATCH_R1_CMPGT MATCH_R1_OPX0 (CMPLT)
+#define MASK_R1_CMPGT MASK_R1_OPX0
+#define MATCH_R1_CMPGTI MATCH_R1_OP (CMPGEI)
+#define MASK_R1_CMPGTI MASK_R1_OP
+#define MATCH_R1_CMPGTU MATCH_R1_OPX0 (CMPLTU)
+#define MASK_R1_CMPGTU MASK_R1_OPX0
+#define MATCH_R1_CMPGTUI MATCH_R1_OP (CMPGEUI)
+#define MASK_R1_CMPGTUI MASK_R1_OP
+#define MATCH_R1_CMPLE MATCH_R1_OPX0 (CMPGE)
+#define MASK_R1_CMPLE MASK_R1_OPX0
+#define MATCH_R1_CMPLEI MATCH_R1_OP (CMPLTI)
+#define MASK_R1_CMPLEI MASK_R1_OP
+#define MATCH_R1_CMPLEU MATCH_R1_OPX0 (CMPGEU)
+#define MASK_R1_CMPLEU MASK_R1_OPX0
+#define MATCH_R1_CMPLEUI MATCH_R1_OP (CMPLTUI)
+#define MASK_R1_CMPLEUI MASK_R1_OP
+#define MATCH_R1_CMPLT MATCH_R1_OPX0 (CMPLT)
+#define MASK_R1_CMPLT MASK_R1_OPX0
+#define MATCH_R1_CMPLTI MATCH_R1_OP (CMPLTI)
+#define MASK_R1_CMPLTI MASK_R1_OP
+#define MATCH_R1_CMPLTU MATCH_R1_OPX0 (CMPLTU)
+#define MASK_R1_CMPLTU MASK_R1_OPX0
+#define MATCH_R1_CMPLTUI MATCH_R1_OP (CMPLTUI)
+#define MASK_R1_CMPLTUI MASK_R1_OP
+#define MATCH_R1_CMPNE MATCH_R1_OPX0 (CMPNE)
+#define MASK_R1_CMPNE MASK_R1_OPX0
+#define MATCH_R1_CMPNEI MATCH_R1_OP (CMPNEI)
+#define MASK_R1_CMPNEI MASK_R1_OP
+#define MATCH_R1_CUSTOM MATCH_R1_OP (CUSTOM)
+#define MASK_R1_CUSTOM MASK_R1_OP
+#define MATCH_R1_DIV MATCH_R1_OPX0 (DIV)
+#define MASK_R1_DIV MASK_R1_OPX0
+#define MATCH_R1_DIVU MATCH_R1_OPX0 (DIVU)
+#define MASK_R1_DIVU MASK_R1_OPX0
+#define MATCH_R1_ERET MATCH_R1_OPX (ERET, 0x1d, 0x1e, 0)
+#define MASK_R1_ERET MASK_R1_OPX (1, 1, 1, 1)
+#define MATCH_R1_FLUSHD MATCH_R1_OP (FLUSHD) | SET_IW_I_B (0)
+#define MASK_R1_FLUSHD MASK_R1_OP | IW_I_B_SHIFTED_MASK
+#define MATCH_R1_FLUSHDA MATCH_R1_OP (FLUSHDA) | SET_IW_I_B (0)
+#define MASK_R1_FLUSHDA MASK_R1_OP | IW_I_B_SHIFTED_MASK
+#define MATCH_R1_FLUSHI MATCH_R1_OPX (FLUSHI, 0, 0, 0)
+#define MASK_R1_FLUSHI MASK_R1_OPX (0, 1, 1, 1)
+#define MATCH_R1_FLUSHP MATCH_R1_OPX (FLUSHP, 0, 0, 0)
+#define MASK_R1_FLUSHP MASK_R1_OPX (1, 1, 1, 1)
+#define MATCH_R1_INITD MATCH_R1_OP (INITD) | SET_IW_I_B (0)
+#define MASK_R1_INITD MASK_R1_OP | IW_I_B_SHIFTED_MASK
+#define MATCH_R1_INITDA MATCH_R1_OP (INITDA) | SET_IW_I_B (0)
+#define MASK_R1_INITDA MASK_R1_OP | IW_I_B_SHIFTED_MASK
+#define MATCH_R1_INITI MATCH_R1_OPX (INITI, 0, 0, 0)
+#define MASK_R1_INITI MASK_R1_OPX (0, 1, 1, 1)
+#define MATCH_R1_JMP MATCH_R1_OPX (JMP, 0, 0, 0)
+#define MASK_R1_JMP MASK_R1_OPX (0, 1, 1, 1)
+#define MATCH_R1_JMPI MATCH_R1_OP (JMPI)
+#define MASK_R1_JMPI MASK_R1_OP
+#define MATCH_R1_LDB MATCH_R1_OP (LDB)
+#define MASK_R1_LDB MASK_R1_OP
+#define MATCH_R1_LDBIO MATCH_R1_OP (LDBIO)
+#define MASK_R1_LDBIO MASK_R1_OP
+#define MATCH_R1_LDBU MATCH_R1_OP (LDBU)
+#define MASK_R1_LDBU MASK_R1_OP
+#define MATCH_R1_LDBUIO MATCH_R1_OP (LDBUIO)
+#define MASK_R1_LDBUIO MASK_R1_OP
+#define MATCH_R1_LDH MATCH_R1_OP (LDH)
+#define MASK_R1_LDH MASK_R1_OP
+#define MATCH_R1_LDHIO MATCH_R1_OP (LDHIO)
+#define MASK_R1_LDHIO MASK_R1_OP
+#define MATCH_R1_LDHU MATCH_R1_OP (LDHU)
+#define MASK_R1_LDHU MASK_R1_OP
+#define MATCH_R1_LDHUIO MATCH_R1_OP (LDHUIO)
+#define MASK_R1_LDHUIO MASK_R1_OP
+#define MATCH_R1_LDW MATCH_R1_OP (LDW)
+#define MASK_R1_LDW MASK_R1_OP
+#define MATCH_R1_LDWIO MATCH_R1_OP (LDWIO)
+#define MASK_R1_LDWIO MASK_R1_OP
+#define MATCH_R1_MOV MATCH_R1_OPX (ADD, 0, 0, 0)
+#define MASK_R1_MOV MASK_R1_OPX (0, 1, 0, 1)
+#define MATCH_R1_MOVHI MATCH_R1_OP (ORHI) | SET_IW_I_A (0)
+#define MASK_R1_MOVHI MASK_R1_OP | IW_I_A_SHIFTED_MASK
+#define MATCH_R1_MOVI MATCH_R1_OP (ADDI) | SET_IW_I_A (0)
+#define MASK_R1_MOVI MASK_R1_OP | IW_I_A_SHIFTED_MASK
+#define MATCH_R1_MOVUI MATCH_R1_OP (ORI) | SET_IW_I_A (0)
+#define MASK_R1_MOVUI MASK_R1_OP | IW_I_A_SHIFTED_MASK
+#define MATCH_R1_MUL MATCH_R1_OPX0 (MUL)
+#define MASK_R1_MUL MASK_R1_OPX0
+#define MATCH_R1_MULI MATCH_R1_OP (MULI)
+#define MASK_R1_MULI MASK_R1_OP
+#define MATCH_R1_MULXSS MATCH_R1_OPX0 (MULXSS)
+#define MASK_R1_MULXSS MASK_R1_OPX0
+#define MATCH_R1_MULXSU MATCH_R1_OPX0 (MULXSU)
+#define MASK_R1_MULXSU MASK_R1_OPX0
+#define MATCH_R1_MULXUU MATCH_R1_OPX0 (MULXUU)
+#define MASK_R1_MULXUU MASK_R1_OPX0
+#define MATCH_R1_NEXTPC MATCH_R1_OPX (NEXTPC, 0, 0, 0)
+#define MASK_R1_NEXTPC MASK_R1_OPX (1, 1, 0, 1)
+#define MATCH_R1_NOP MATCH_R1_OPX (ADD, 0, 0, 0)
+#define MASK_R1_NOP MASK_R1_OPX (1, 1, 1, 1)
+#define MATCH_R1_NOR MATCH_R1_OPX0 (NOR)
+#define MASK_R1_NOR MASK_R1_OPX0
+#define MATCH_R1_OR MATCH_R1_OPX0 (OR)
+#define MASK_R1_OR MASK_R1_OPX0
+#define MATCH_R1_ORHI MATCH_R1_OP (ORHI)
+#define MASK_R1_ORHI MASK_R1_OP
+#define MATCH_R1_ORI MATCH_R1_OP (ORI)
+#define MASK_R1_ORI MASK_R1_OP
+#define MATCH_R1_RDCTL MATCH_R1_OPX (RDCTL, 0, 0, 0)
+#define MASK_R1_RDCTL MASK_R1_OPX (1, 1, 0, 0)
+#define MATCH_R1_RDPRS MATCH_R1_OP (RDPRS)
+#define MASK_R1_RDPRS MASK_R1_OP
+#define MATCH_R1_RET MATCH_R1_OPX (RET, 0x1f, 0, 0)
+#define MASK_R1_RET MASK_R1_OPX (1, 1, 1, 1)
+#define MATCH_R1_ROL MATCH_R1_OPX0 (ROL)
+#define MASK_R1_ROL MASK_R1_OPX0
+#define MATCH_R1_ROLI MATCH_R1_OPX (ROLI, 0, 0, 0)
+#define MASK_R1_ROLI MASK_R1_OPX (0, 1, 0, 0)
+#define MATCH_R1_ROR MATCH_R1_OPX0 (ROR)
+#define MASK_R1_ROR MASK_R1_OPX0
+#define MATCH_R1_SLL MATCH_R1_OPX0 (SLL)
+#define MASK_R1_SLL MASK_R1_OPX0
+#define MATCH_R1_SLLI MATCH_R1_OPX (SLLI, 0, 0, 0)
+#define MASK_R1_SLLI MASK_R1_OPX (0, 1, 0, 0)
+#define MATCH_R1_SRA MATCH_R1_OPX0 (SRA)
+#define MASK_R1_SRA MASK_R1_OPX0
+#define MATCH_R1_SRAI MATCH_R1_OPX (SRAI, 0, 0, 0)
+#define MASK_R1_SRAI MASK_R1_OPX (0, 1, 0, 0)
+#define MATCH_R1_SRL MATCH_R1_OPX0 (SRL)
+#define MASK_R1_SRL MASK_R1_OPX0
+#define MATCH_R1_SRLI MATCH_R1_OPX (SRLI, 0, 0, 0)
+#define MASK_R1_SRLI MASK_R1_OPX (0, 1, 0, 0)
+#define MATCH_R1_STB MATCH_R1_OP (STB)
+#define MASK_R1_STB MASK_R1_OP
+#define MATCH_R1_STBIO MATCH_R1_OP (STBIO)
+#define MASK_R1_STBIO MASK_R1_OP
+#define MATCH_R1_STH MATCH_R1_OP (STH)
+#define MASK_R1_STH MASK_R1_OP
+#define MATCH_R1_STHIO MATCH_R1_OP (STHIO)
+#define MASK_R1_STHIO MASK_R1_OP
+#define MATCH_R1_STW MATCH_R1_OP (STW)
+#define MASK_R1_STW MASK_R1_OP
+#define MATCH_R1_STWIO MATCH_R1_OP (STWIO)
+#define MASK_R1_STWIO MASK_R1_OP
+#define MATCH_R1_SUB MATCH_R1_OPX0 (SUB)
+#define MASK_R1_SUB MASK_R1_OPX0
+#define MATCH_R1_SUBI MATCH_R1_OP (ADDI)
+#define MASK_R1_SUBI MASK_R1_OP
+#define MATCH_R1_SYNC MATCH_R1_OPX (SYNC, 0, 0, 0)
+#define MASK_R1_SYNC MASK_R1_OPX (1, 1, 1, 1)
+#define MATCH_R1_TRAP MATCH_R1_OPX (TRAP, 0, 0, 0x1d)
+#define MASK_R1_TRAP MASK_R1_OPX (1, 1, 1, 0)
+#define MATCH_R1_WRCTL MATCH_R1_OPX (WRCTL, 0, 0, 0)
+#define MASK_R1_WRCTL MASK_R1_OPX (0, 1, 1, 0)
+#define MATCH_R1_WRPRS MATCH_R1_OPX (WRPRS, 0, 0, 0)
+#define MASK_R1_WRPRS MASK_R1_OPX (0, 1, 0, 1)
+#define MATCH_R1_XOR MATCH_R1_OPX0 (XOR)
+#define MASK_R1_XOR MASK_R1_OPX0
+#define MATCH_R1_XORHI MATCH_R1_OP (XORHI)
+#define MASK_R1_XORHI MASK_R1_OP
+#define MATCH_R1_XORI MATCH_R1_OP (XORI)
+#define MASK_R1_XORI MASK_R1_OP
+
+#endif /* _NIOS2R1_H */
+
+/*#include "nios2r2.h"*/
+
+#ifndef _NIOS2R2_H_
+#define _NIOS2R2_H_
+
+/* Fields for 32-bit R2 instructions. */
+
+#define IW_R2_OP_LSB 0
+#define IW_R2_OP_SIZE 6
+#define IW_R2_OP_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R2_OP_SIZE))
+#define IW_R2_OP_SHIFTED_MASK (IW_R2_OP_UNSHIFTED_MASK << IW_R2_OP_LSB)
+#define GET_IW_R2_OP(W) (((W) >> IW_R2_OP_LSB) & IW_R2_OP_UNSHIFTED_MASK)
+#define SET_IW_R2_OP(V) (((V) & IW_R2_OP_UNSHIFTED_MASK) << IW_R2_OP_LSB)
+
+#define IW_L26_IMM26_LSB 6
+#define IW_L26_IMM26_SIZE 26
+#define IW_L26_IMM26_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L26_IMM26_SIZE))
+#define IW_L26_IMM26_SHIFTED_MASK (IW_L26_IMM26_UNSHIFTED_MASK << IW_L26_IMM26_LSB)
+#define GET_IW_L26_IMM26(W) (((W) >> IW_L26_IMM26_LSB) & IW_L26_IMM26_UNSHIFTED_MASK)
+#define SET_IW_L26_IMM26(V) (((V) & IW_L26_IMM26_UNSHIFTED_MASK) << IW_L26_IMM26_LSB)
+
+#define IW_F2I16_A_LSB 6
+#define IW_F2I16_A_SIZE 5
+#define IW_F2I16_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2I16_A_SIZE))
+#define IW_F2I16_A_SHIFTED_MASK (IW_F2I16_A_UNSHIFTED_MASK << IW_F2I16_A_LSB)
+#define GET_IW_F2I16_A(W) (((W) >> IW_F2I16_A_LSB) & IW_F2I16_A_UNSHIFTED_MASK)
+#define SET_IW_F2I16_A(V) (((V) & IW_F2I16_A_UNSHIFTED_MASK) << IW_F2I16_A_LSB)
+
+#define IW_F2I16_B_LSB 11
+#define IW_F2I16_B_SIZE 5
+#define IW_F2I16_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2I16_B_SIZE))
+#define IW_F2I16_B_SHIFTED_MASK (IW_F2I16_B_UNSHIFTED_MASK << IW_F2I16_B_LSB)
+#define GET_IW_F2I16_B(W) (((W) >> IW_F2I16_B_LSB) & IW_F2I16_B_UNSHIFTED_MASK)
+#define SET_IW_F2I16_B(V) (((V) & IW_F2I16_B_UNSHIFTED_MASK) << IW_F2I16_B_LSB)
+
+#define IW_F2I16_IMM16_LSB 16
+#define IW_F2I16_IMM16_SIZE 16
+#define IW_F2I16_IMM16_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2I16_IMM16_SIZE))
+#define IW_F2I16_IMM16_SHIFTED_MASK (IW_F2I16_IMM16_UNSHIFTED_MASK << IW_F2I16_IMM16_LSB)
+#define GET_IW_F2I16_IMM16(W) (((W) >> IW_F2I16_IMM16_LSB) & IW_F2I16_IMM16_UNSHIFTED_MASK)
+#define SET_IW_F2I16_IMM16(V) (((V) & IW_F2I16_IMM16_UNSHIFTED_MASK) << IW_F2I16_IMM16_LSB)
+
+/* Common to all three I12-group formats F2X4I12, F1X4I12, F1X4L17. */
+#define IW_I12_X_LSB 28
+#define IW_I12_X_SIZE 4
+#define IW_I12_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I12_X_SIZE))
+#define IW_I12_X_SHIFTED_MASK (IW_I12_X_UNSHIFTED_MASK << IW_I12_X_LSB)
+#define GET_IW_I12_X(W) (((W) >> IW_I12_X_LSB) & IW_I12_X_UNSHIFTED_MASK)
+#define SET_IW_I12_X(V) (((V) & IW_I12_X_UNSHIFTED_MASK) << IW_I12_X_LSB)
+
+#define IW_F2X4I12_A_LSB 6
+#define IW_F2X4I12_A_SIZE 5
+#define IW_F2X4I12_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X4I12_A_SIZE))
+#define IW_F2X4I12_A_SHIFTED_MASK (IW_F2X4I12_A_UNSHIFTED_MASK << IW_F2X4I12_A_LSB)
+#define GET_IW_F2X4I12_A(W) (((W) >> IW_F2X4I12_A_LSB) & IW_F2X4I12_A_UNSHIFTED_MASK)
+#define SET_IW_F2X4I12_A(V) (((V) & IW_F2X4I12_A_UNSHIFTED_MASK) << IW_F2X4I12_A_LSB)
+
+#define IW_F2X4I12_B_LSB 11
+#define IW_F2X4I12_B_SIZE 5
+#define IW_F2X4I12_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X4I12_B_SIZE))
+#define IW_F2X4I12_B_SHIFTED_MASK (IW_F2X4I12_B_UNSHIFTED_MASK << IW_F2X4I12_B_LSB)
+#define GET_IW_F2X4I12_B(W) (((W) >> IW_F2X4I12_B_LSB) & IW_F2X4I12_B_UNSHIFTED_MASK)
+#define SET_IW_F2X4I12_B(V) (((V) & IW_F2X4I12_B_UNSHIFTED_MASK) << IW_F2X4I12_B_LSB)
+
+#define IW_F2X4I12_IMM12_LSB 16
+#define IW_F2X4I12_IMM12_SIZE 12
+#define IW_F2X4I12_IMM12_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X4I12_IMM12_SIZE))
+#define IW_F2X4I12_IMM12_SHIFTED_MASK (IW_F2X4I12_IMM12_UNSHIFTED_MASK << IW_F2X4I12_IMM12_LSB)
+#define GET_IW_F2X4I12_IMM12(W) (((W) >> IW_F2X4I12_IMM12_LSB) & IW_F2X4I12_IMM12_UNSHIFTED_MASK)
+#define SET_IW_F2X4I12_IMM12(V) (((V) & IW_F2X4I12_IMM12_UNSHIFTED_MASK) << IW_F2X4I12_IMM12_LSB)
+
+#define IW_F1X4I12_A_LSB 6
+#define IW_F1X4I12_A_SIZE 5
+#define IW_F1X4I12_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4I12_A_SIZE))
+#define IW_F1X4I12_A_SHIFTED_MASK (IW_F1X4I12_A_UNSHIFTED_MASK << IW_F1X4I12_A_LSB)
+#define GET_IW_F1X4I12_A(W) (((W) >> IW_F1X4I12_A_LSB) & IW_F1X4I12_A_UNSHIFTED_MASK)
+#define SET_IW_F1X4I12_A(V) (((V) & IW_F1X4I12_A_UNSHIFTED_MASK) << IW_F1X4I12_A_LSB)
+
+#define IW_F1X4I12_X_LSB 11
+#define IW_F1X4I12_X_SIZE 5
+#define IW_F1X4I12_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4I12_X_SIZE))
+#define IW_F1X4I12_X_SHIFTED_MASK (IW_F1X4I12_X_UNSHIFTED_MASK << IW_F1X4I12_X_LSB)
+#define GET_IW_F1X4I12_X(W) (((W) >> IW_F1X4I12_X_LSB) & IW_F1X4I12_X_UNSHIFTED_MASK)
+#define SET_IW_F1X4I12_X(V) (((V) & IW_F1X4I12_X_UNSHIFTED_MASK) << IW_F1X4I12_X_LSB)
+
+#define IW_F1X4I12_IMM12_LSB 16
+#define IW_F1X4I12_IMM12_SIZE 12
+#define IW_F1X4I12_IMM12_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4I12_IMM12_SIZE))
+#define IW_F1X4I12_IMM12_SHIFTED_MASK (IW_F1X4I12_IMM12_UNSHIFTED_MASK << IW_F1X4I12_IMM12_LSB)
+#define GET_IW_F1X4I12_IMM12(W) (((W) >> IW_F1X4I12_IMM12_LSB) & IW_F1X4I12_IMM12_UNSHIFTED_MASK)
+#define SET_IW_F1X4I12_IMM12(V) (((V) & IW_F1X4I12_IMM12_UNSHIFTED_MASK) << IW_F1X4I12_IMM12_LSB)
+
+#define IW_F1X4L17_A_LSB 6
+#define IW_F1X4L17_A_SIZE 5
+#define IW_F1X4L17_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_A_SIZE))
+#define IW_F1X4L17_A_SHIFTED_MASK (IW_F1X4L17_A_UNSHIFTED_MASK << IW_F1X4L17_A_LSB)
+#define GET_IW_F1X4L17_A(W) (((W) >> IW_F1X4L17_A_LSB) & IW_F1X4L17_A_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_A(V) (((V) & IW_F1X4L17_A_UNSHIFTED_MASK) << IW_F1X4L17_A_LSB)
+
+#define IW_F1X4L17_ID_LSB 11
+#define IW_F1X4L17_ID_SIZE 1
+#define IW_F1X4L17_ID_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_ID_SIZE))
+#define IW_F1X4L17_ID_SHIFTED_MASK (IW_F1X4L17_ID_UNSHIFTED_MASK << IW_F1X4L17_ID_LSB)
+#define GET_IW_F1X4L17_ID(W) (((W) >> IW_F1X4L17_ID_LSB) & IW_F1X4L17_ID_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_ID(V) (((V) & IW_F1X4L17_ID_UNSHIFTED_MASK) << IW_F1X4L17_ID_LSB)
+
+#define IW_F1X4L17_WB_LSB 12
+#define IW_F1X4L17_WB_SIZE 1
+#define IW_F1X4L17_WB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_WB_SIZE))
+#define IW_F1X4L17_WB_SHIFTED_MASK (IW_F1X4L17_WB_UNSHIFTED_MASK << IW_F1X4L17_WB_LSB)
+#define GET_IW_F1X4L17_WB(W) (((W) >> IW_F1X4L17_WB_LSB) & IW_F1X4L17_WB_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_WB(V) (((V) & IW_F1X4L17_WB_UNSHIFTED_MASK) << IW_F1X4L17_WB_LSB)
+
+#define IW_F1X4L17_RS_LSB 13
+#define IW_F1X4L17_RS_SIZE 1
+#define IW_F1X4L17_RS_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_RS_SIZE))
+#define IW_F1X4L17_RS_SHIFTED_MASK (IW_F1X4L17_RS_UNSHIFTED_MASK << IW_F1X4L17_RS_LSB)
+#define GET_IW_F1X4L17_RS(W) (((W) >> IW_F1X4L17_RS_LSB) & IW_F1X4L17_RS_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_RS(V) (((V) & IW_F1X4L17_RS_UNSHIFTED_MASK) << IW_F1X4L17_RS_LSB)
+
+#define IW_F1X4L17_PC_LSB 14
+#define IW_F1X4L17_PC_SIZE 1
+#define IW_F1X4L17_PC_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_PC_SIZE))
+#define IW_F1X4L17_PC_SHIFTED_MASK (IW_F1X4L17_PC_UNSHIFTED_MASK << IW_F1X4L17_PC_LSB)
+#define GET_IW_F1X4L17_PC(W) (((W) >> IW_F1X4L17_PC_LSB) & IW_F1X4L17_PC_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_PC(V) (((V) & IW_F1X4L17_PC_UNSHIFTED_MASK) << IW_F1X4L17_PC_LSB)
+
+#define IW_F1X4L17_RSV_LSB 15
+#define IW_F1X4L17_RSV_SIZE 1
+#define IW_F1X4L17_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_RSV_SIZE))
+#define IW_F1X4L17_RSV_SHIFTED_MASK (IW_F1X4L17_RSV_UNSHIFTED_MASK << IW_F1X4L17_RSV_LSB)
+#define GET_IW_F1X4L17_RSV(W) (((W) >> IW_F1X4L17_RSV_LSB) & IW_F1X4L17_RSV_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_RSV(V) (((V) & IW_F1X4L17_RSV_UNSHIFTED_MASK) << IW_F1X4L17_RSV_LSB)
+
+#define IW_F1X4L17_REGMASK_LSB 16
+#define IW_F1X4L17_REGMASK_SIZE 12
+#define IW_F1X4L17_REGMASK_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X4L17_REGMASK_SIZE))
+#define IW_F1X4L17_REGMASK_SHIFTED_MASK (IW_F1X4L17_REGMASK_UNSHIFTED_MASK << IW_F1X4L17_REGMASK_LSB)
+#define GET_IW_F1X4L17_REGMASK(W) (((W) >> IW_F1X4L17_REGMASK_LSB) & IW_F1X4L17_REGMASK_UNSHIFTED_MASK)
+#define SET_IW_F1X4L17_REGMASK(V) (((V) & IW_F1X4L17_REGMASK_UNSHIFTED_MASK) << IW_F1X4L17_REGMASK_LSB)
+
+/* Shared by OPX-group formats F3X6L5, F2X6L10, F3X6. */
+#define IW_OPX_X_LSB 26
+#define IW_OPX_X_SIZE 6
+#define IW_OPX_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_OPX_X_SIZE))
+#define IW_OPX_X_SHIFTED_MASK (IW_OPX_X_UNSHIFTED_MASK << IW_OPX_X_LSB)
+#define GET_IW_OPX_X(W) (((W) >> IW_OPX_X_LSB) & IW_OPX_X_UNSHIFTED_MASK)
+#define SET_IW_OPX_X(V) (((V) & IW_OPX_X_UNSHIFTED_MASK) << IW_OPX_X_LSB)
+
+/* F3X6L5 accessors are also used for F3X6 formats. */
+#define IW_F3X6L5_A_LSB 6
+#define IW_F3X6L5_A_SIZE 5
+#define IW_F3X6L5_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_A_SIZE))
+#define IW_F3X6L5_A_SHIFTED_MASK (IW_F3X6L5_A_UNSHIFTED_MASK << IW_F3X6L5_A_LSB)
+#define GET_IW_F3X6L5_A(W) (((W) >> IW_F3X6L5_A_LSB) & IW_F3X6L5_A_UNSHIFTED_MASK)
+#define SET_IW_F3X6L5_A(V) (((V) & IW_F3X6L5_A_UNSHIFTED_MASK) << IW_F3X6L5_A_LSB)
+
+#define IW_F3X6L5_B_LSB 11
+#define IW_F3X6L5_B_SIZE 5
+#define IW_F3X6L5_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_B_SIZE))
+#define IW_F3X6L5_B_SHIFTED_MASK (IW_F3X6L5_B_UNSHIFTED_MASK << IW_F3X6L5_B_LSB)
+#define GET_IW_F3X6L5_B(W) (((W) >> IW_F3X6L5_B_LSB) & IW_F3X6L5_B_UNSHIFTED_MASK)
+#define SET_IW_F3X6L5_B(V) (((V) & IW_F3X6L5_B_UNSHIFTED_MASK) << IW_F3X6L5_B_LSB)
+
+#define IW_F3X6L5_C_LSB 16
+#define IW_F3X6L5_C_SIZE 5
+#define IW_F3X6L5_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_C_SIZE))
+#define IW_F3X6L5_C_SHIFTED_MASK (IW_F3X6L5_C_UNSHIFTED_MASK << IW_F3X6L5_C_LSB)
+#define GET_IW_F3X6L5_C(W) (((W) >> IW_F3X6L5_C_LSB) & IW_F3X6L5_C_UNSHIFTED_MASK)
+#define SET_IW_F3X6L5_C(V) (((V) & IW_F3X6L5_C_UNSHIFTED_MASK) << IW_F3X6L5_C_LSB)
+
+#define IW_F3X6L5_IMM5_LSB 21
+#define IW_F3X6L5_IMM5_SIZE 5
+#define IW_F3X6L5_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X6L5_IMM5_SIZE))
+#define IW_F3X6L5_IMM5_SHIFTED_MASK (IW_F3X6L5_IMM5_UNSHIFTED_MASK << IW_F3X6L5_IMM5_LSB)
+#define GET_IW_F3X6L5_IMM5(W) (((W) >> IW_F3X6L5_IMM5_LSB) & IW_F3X6L5_IMM5_UNSHIFTED_MASK)
+#define SET_IW_F3X6L5_IMM5(V) (((V) & IW_F3X6L5_IMM5_UNSHIFTED_MASK) << IW_F3X6L5_IMM5_LSB)
+
+#define IW_F2X6L10_A_LSB 6
+#define IW_F2X6L10_A_SIZE 5
+#define IW_F2X6L10_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_A_SIZE))
+#define IW_F2X6L10_A_SHIFTED_MASK (IW_F2X6L10_A_UNSHIFTED_MASK << IW_F2X6L10_A_LSB)
+#define GET_IW_F2X6L10_A(W) (((W) >> IW_F2X6L10_A_LSB) & IW_F2X6L10_A_UNSHIFTED_MASK)
+#define SET_IW_F2X6L10_A(V) (((V) & IW_F2X6L10_A_UNSHIFTED_MASK) << IW_F2X6L10_A_LSB)
+
+#define IW_F2X6L10_B_LSB 11
+#define IW_F2X6L10_B_SIZE 5
+#define IW_F2X6L10_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_B_SIZE))
+#define IW_F2X6L10_B_SHIFTED_MASK (IW_F2X6L10_B_UNSHIFTED_MASK << IW_F2X6L10_B_LSB)
+#define GET_IW_F2X6L10_B(W) (((W) >> IW_F2X6L10_B_LSB) & IW_F2X6L10_B_UNSHIFTED_MASK)
+#define SET_IW_F2X6L10_B(V) (((V) & IW_F2X6L10_B_UNSHIFTED_MASK) << IW_F2X6L10_B_LSB)
+
+#define IW_F2X6L10_LSB_LSB 16
+#define IW_F2X6L10_LSB_SIZE 5
+#define IW_F2X6L10_LSB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_LSB_SIZE))
+#define IW_F2X6L10_LSB_SHIFTED_MASK (IW_F2X6L10_LSB_UNSHIFTED_MASK << IW_F2X6L10_LSB_LSB)
+#define GET_IW_F2X6L10_LSB(W) (((W) >> IW_F2X6L10_LSB_LSB) & IW_F2X6L10_LSB_UNSHIFTED_MASK)
+#define SET_IW_F2X6L10_LSB(V) (((V) & IW_F2X6L10_LSB_UNSHIFTED_MASK) << IW_F2X6L10_LSB_LSB)
+
+#define IW_F2X6L10_MSB_LSB 21
+#define IW_F2X6L10_MSB_SIZE 5
+#define IW_F2X6L10_MSB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2X6L10_MSB_SIZE))
+#define IW_F2X6L10_MSB_SHIFTED_MASK (IW_F2X6L10_MSB_UNSHIFTED_MASK << IW_F2X6L10_MSB_LSB)
+#define GET_IW_F2X6L10_MSB(W) (((W) >> IW_F2X6L10_MSB_LSB) & IW_F2X6L10_MSB_UNSHIFTED_MASK)
+#define SET_IW_F2X6L10_MSB(V) (((V) & IW_F2X6L10_MSB_UNSHIFTED_MASK) << IW_F2X6L10_MSB_LSB)
+
+#define IW_F3X8_A_LSB 6
+#define IW_F3X8_A_SIZE 5
+#define IW_F3X8_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_A_SIZE))
+#define IW_F3X8_A_SHIFTED_MASK (IW_F3X8_A_UNSHIFTED_MASK << IW_F3X8_A_LSB)
+#define GET_IW_F3X8_A(W) (((W) >> IW_F3X8_A_LSB) & IW_F3X8_A_UNSHIFTED_MASK)
+#define SET_IW_F3X8_A(V) (((V) & IW_F3X8_A_UNSHIFTED_MASK) << IW_F3X8_A_LSB)
+
+#define IW_F3X8_B_LSB 11
+#define IW_F3X8_B_SIZE 5
+#define IW_F3X8_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_B_SIZE))
+#define IW_F3X8_B_SHIFTED_MASK (IW_F3X8_B_UNSHIFTED_MASK << IW_F3X8_B_LSB)
+#define GET_IW_F3X8_B(W) (((W) >> IW_F3X8_B_LSB) & IW_F3X8_B_UNSHIFTED_MASK)
+#define SET_IW_F3X8_B(V) (((V) & IW_F3X8_B_UNSHIFTED_MASK) << IW_F3X8_B_LSB)
+
+#define IW_F3X8_C_LSB 16
+#define IW_F3X8_C_SIZE 5
+#define IW_F3X8_C_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_C_SIZE))
+#define IW_F3X8_C_SHIFTED_MASK (IW_F3X8_C_UNSHIFTED_MASK << IW_F3X8_C_LSB)
+#define GET_IW_F3X8_C(W) (((W) >> IW_F3X8_C_LSB) & IW_F3X8_C_UNSHIFTED_MASK)
+#define SET_IW_F3X8_C(V) (((V) & IW_F3X8_C_UNSHIFTED_MASK) << IW_F3X8_C_LSB)
+
+#define IW_F3X8_READA_LSB 21
+#define IW_F3X8_READA_SIZE 1
+#define IW_F3X8_READA_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_READA_SIZE))
+#define IW_F3X8_READA_SHIFTED_MASK (IW_F3X8_READA_UNSHIFTED_MASK << IW_F3X8_READA_LSB)
+#define GET_IW_F3X8_READA(W) (((W) >> IW_F3X8_READA_LSB) & IW_F3X8_READA_UNSHIFTED_MASK)
+#define SET_IW_F3X8_READA(V) (((V) & IW_F3X8_READA_UNSHIFTED_MASK) << IW_F3X8_READA_LSB)
+
+#define IW_F3X8_READB_LSB 22
+#define IW_F3X8_READB_SIZE 1
+#define IW_F3X8_READB_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_READB_SIZE))
+#define IW_F3X8_READB_SHIFTED_MASK (IW_F3X8_READB_UNSHIFTED_MASK << IW_F3X8_READB_LSB)
+#define GET_IW_F3X8_READB(W) (((W) >> IW_F3X8_READB_LSB) & IW_F3X8_READB_UNSHIFTED_MASK)
+#define SET_IW_F3X8_READB(V) (((V) & IW_F3X8_READB_UNSHIFTED_MASK) << IW_F3X8_READB_LSB)
+
+#define IW_F3X8_READC_LSB 23
+#define IW_F3X8_READC_SIZE 1
+#define IW_F3X8_READC_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_READC_SIZE))
+#define IW_F3X8_READC_SHIFTED_MASK (IW_F3X8_READC_UNSHIFTED_MASK << IW_F3X8_READC_LSB)
+#define GET_IW_F3X8_READC(W) (((W) >> IW_F3X8_READC_LSB) & IW_F3X8_READC_UNSHIFTED_MASK)
+#define SET_IW_F3X8_READC(V) (((V) & IW_F3X8_READC_UNSHIFTED_MASK) << IW_F3X8_READC_LSB)
+
+#define IW_F3X8_N_LSB 24
+#define IW_F3X8_N_SIZE 8
+#define IW_F3X8_N_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F3X8_N_SIZE))
+#define IW_F3X8_N_SHIFTED_MASK (IW_F3X8_N_UNSHIFTED_MASK << IW_F3X8_N_LSB)
+#define GET_IW_F3X8_N(W) (((W) >> IW_F3X8_N_LSB) & IW_F3X8_N_UNSHIFTED_MASK)
+#define SET_IW_F3X8_N(V) (((V) & IW_F3X8_N_UNSHIFTED_MASK) << IW_F3X8_N_LSB)
+
+/* 16-bit R2 fields. */
+
+#define IW_I10_IMM10_LSB 6
+#define IW_I10_IMM10_SIZE 10
+#define IW_I10_IMM10_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_I10_IMM10_SIZE))
+#define IW_I10_IMM10_SHIFTED_MASK (IW_I10_IMM10_UNSHIFTED_MASK << IW_I10_IMM10_LSB)
+#define GET_IW_I10_IMM10(W) (((W) >> IW_I10_IMM10_LSB) & IW_I10_IMM10_UNSHIFTED_MASK)
+#define SET_IW_I10_IMM10(V) (((V) & IW_I10_IMM10_UNSHIFTED_MASK) << IW_I10_IMM10_LSB)
+
+#define IW_T1I7_A3_LSB 6
+#define IW_T1I7_A3_SIZE 3
+#define IW_T1I7_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1I7_A3_SIZE))
+#define IW_T1I7_A3_SHIFTED_MASK (IW_T1I7_A3_UNSHIFTED_MASK << IW_T1I7_A3_LSB)
+#define GET_IW_T1I7_A3(W) (((W) >> IW_T1I7_A3_LSB) & IW_T1I7_A3_UNSHIFTED_MASK)
+#define SET_IW_T1I7_A3(V) (((V) & IW_T1I7_A3_UNSHIFTED_MASK) << IW_T1I7_A3_LSB)
+
+#define IW_T1I7_IMM7_LSB 9
+#define IW_T1I7_IMM7_SIZE 7
+#define IW_T1I7_IMM7_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1I7_IMM7_SIZE))
+#define IW_T1I7_IMM7_SHIFTED_MASK (IW_T1I7_IMM7_UNSHIFTED_MASK << IW_T1I7_IMM7_LSB)
+#define GET_IW_T1I7_IMM7(W) (((W) >> IW_T1I7_IMM7_LSB) & IW_T1I7_IMM7_UNSHIFTED_MASK)
+#define SET_IW_T1I7_IMM7(V) (((V) & IW_T1I7_IMM7_UNSHIFTED_MASK) << IW_T1I7_IMM7_LSB)
+
+#define IW_T2I4_A3_LSB 6
+#define IW_T2I4_A3_SIZE 3
+#define IW_T2I4_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2I4_A3_SIZE))
+#define IW_T2I4_A3_SHIFTED_MASK (IW_T2I4_A3_UNSHIFTED_MASK << IW_T2I4_A3_LSB)
+#define GET_IW_T2I4_A3(W) (((W) >> IW_T2I4_A3_LSB) & IW_T2I4_A3_UNSHIFTED_MASK)
+#define SET_IW_T2I4_A3(V) (((V) & IW_T2I4_A3_UNSHIFTED_MASK) << IW_T2I4_A3_LSB)
+
+#define IW_T2I4_B3_LSB 9
+#define IW_T2I4_B3_SIZE 3
+#define IW_T2I4_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2I4_B3_SIZE))
+#define IW_T2I4_B3_SHIFTED_MASK (IW_T2I4_B3_UNSHIFTED_MASK << IW_T2I4_B3_LSB)
+#define GET_IW_T2I4_B3(W) (((W) >> IW_T2I4_B3_LSB) & IW_T2I4_B3_UNSHIFTED_MASK)
+#define SET_IW_T2I4_B3(V) (((V) & IW_T2I4_B3_UNSHIFTED_MASK) << IW_T2I4_B3_LSB)
+
+#define IW_T2I4_IMM4_LSB 12
+#define IW_T2I4_IMM4_SIZE 4
+#define IW_T2I4_IMM4_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2I4_IMM4_SIZE))
+#define IW_T2I4_IMM4_SHIFTED_MASK (IW_T2I4_IMM4_UNSHIFTED_MASK << IW_T2I4_IMM4_LSB)
+#define GET_IW_T2I4_IMM4(W) (((W) >> IW_T2I4_IMM4_LSB) & IW_T2I4_IMM4_UNSHIFTED_MASK)
+#define SET_IW_T2I4_IMM4(V) (((V) & IW_T2I4_IMM4_UNSHIFTED_MASK) << IW_T2I4_IMM4_LSB)
+
+#define IW_T1X1I6_A3_LSB 6
+#define IW_T1X1I6_A3_SIZE 3
+#define IW_T1X1I6_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1X1I6_A3_SIZE))
+#define IW_T1X1I6_A3_SHIFTED_MASK (IW_T1X1I6_A3_UNSHIFTED_MASK << IW_T1X1I6_A3_LSB)
+#define GET_IW_T1X1I6_A3(W) (((W) >> IW_T1X1I6_A3_LSB) & IW_T1X1I6_A3_UNSHIFTED_MASK)
+#define SET_IW_T1X1I6_A3(V) (((V) & IW_T1X1I6_A3_UNSHIFTED_MASK) << IW_T1X1I6_A3_LSB)
+
+#define IW_T1X1I6_IMM6_LSB 9
+#define IW_T1X1I6_IMM6_SIZE 6
+#define IW_T1X1I6_IMM6_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1X1I6_IMM6_SIZE))
+#define IW_T1X1I6_IMM6_SHIFTED_MASK (IW_T1X1I6_IMM6_UNSHIFTED_MASK << IW_T1X1I6_IMM6_LSB)
+#define GET_IW_T1X1I6_IMM6(W) (((W) >> IW_T1X1I6_IMM6_LSB) & IW_T1X1I6_IMM6_UNSHIFTED_MASK)
+#define SET_IW_T1X1I6_IMM6(V) (((V) & IW_T1X1I6_IMM6_UNSHIFTED_MASK) << IW_T1X1I6_IMM6_LSB)
+
+#define IW_T1X1I6_X_LSB 15
+#define IW_T1X1I6_X_SIZE 1
+#define IW_T1X1I6_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T1X1I6_X_SIZE))
+#define IW_T1X1I6_X_SHIFTED_MASK (IW_T1X1I6_X_UNSHIFTED_MASK << IW_T1X1I6_X_LSB)
+#define GET_IW_T1X1I6_X(W) (((W) >> IW_T1X1I6_X_LSB) & IW_T1X1I6_X_UNSHIFTED_MASK)
+#define SET_IW_T1X1I6_X(V) (((V) & IW_T1X1I6_X_UNSHIFTED_MASK) << IW_T1X1I6_X_LSB)
+
+#define IW_X1I7_IMM7_LSB 6
+#define IW_X1I7_IMM7_SIZE 7
+#define IW_X1I7_IMM7_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X1I7_IMM7_SIZE))
+#define IW_X1I7_IMM7_SHIFTED_MASK (IW_X1I7_IMM7_UNSHIFTED_MASK << IW_X1I7_IMM7_LSB)
+#define GET_IW_X1I7_IMM7(W) (((W) >> IW_X1I7_IMM7_LSB) & IW_X1I7_IMM7_UNSHIFTED_MASK)
+#define SET_IW_X1I7_IMM7(V) (((V) & IW_X1I7_IMM7_UNSHIFTED_MASK) << IW_X1I7_IMM7_LSB)
+
+#define IW_X1I7_RSV_LSB 13
+#define IW_X1I7_RSV_SIZE 2
+#define IW_X1I7_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X1I7_RSV_SIZE))
+#define IW_X1I7_RSV_SHIFTED_MASK (IW_X1I7_RSV_UNSHIFTED_MASK << IW_X1I7_RSV_LSB)
+#define GET_IW_X1I7_RSV(W) (((W) >> IW_X1I7_RSV_LSB) & IW_X1I7_RSV_UNSHIFTED_MASK)
+#define SET_IW_X1I7_RSV(V) (((V) & IW_X1I7_RSV_UNSHIFTED_MASK) << IW_X1I7_RSV_LSB)
+
+#define IW_X1I7_X_LSB 15
+#define IW_X1I7_X_SIZE 1
+#define IW_X1I7_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X1I7_X_SIZE))
+#define IW_X1I7_X_SHIFTED_MASK (IW_X1I7_X_UNSHIFTED_MASK << IW_X1I7_X_LSB)
+#define GET_IW_X1I7_X(W) (((W) >> IW_X1I7_X_LSB) & IW_X1I7_X_UNSHIFTED_MASK)
+#define SET_IW_X1I7_X(V) (((V) & IW_X1I7_X_UNSHIFTED_MASK) << IW_X1I7_X_LSB)
+
+#define IW_L5I4X1_IMM4_LSB 6
+#define IW_L5I4X1_IMM4_SIZE 4
+#define IW_L5I4X1_IMM4_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_IMM4_SIZE))
+#define IW_L5I4X1_IMM4_SHIFTED_MASK (IW_L5I4X1_IMM4_UNSHIFTED_MASK << IW_L5I4X1_IMM4_LSB)
+#define GET_IW_L5I4X1_IMM4(W) (((W) >> IW_L5I4X1_IMM4_LSB) & IW_L5I4X1_IMM4_UNSHIFTED_MASK)
+#define SET_IW_L5I4X1_IMM4(V) (((V) & IW_L5I4X1_IMM4_UNSHIFTED_MASK) << IW_L5I4X1_IMM4_LSB)
+
+#define IW_L5I4X1_REGRANGE_LSB 10
+#define IW_L5I4X1_REGRANGE_SIZE 3
+#define IW_L5I4X1_REGRANGE_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_REGRANGE_SIZE))
+#define IW_L5I4X1_REGRANGE_SHIFTED_MASK (IW_L5I4X1_REGRANGE_UNSHIFTED_MASK << IW_L5I4X1_REGRANGE_LSB)
+#define GET_IW_L5I4X1_REGRANGE(W) (((W) >> IW_L5I4X1_REGRANGE_LSB) & IW_L5I4X1_REGRANGE_UNSHIFTED_MASK)
+#define SET_IW_L5I4X1_REGRANGE(V) (((V) & IW_L5I4X1_REGRANGE_UNSHIFTED_MASK) << IW_L5I4X1_REGRANGE_LSB)
+
+#define IW_L5I4X1_FP_LSB 13
+#define IW_L5I4X1_FP_SIZE 1
+#define IW_L5I4X1_FP_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_FP_SIZE))
+#define IW_L5I4X1_FP_SHIFTED_MASK (IW_L5I4X1_FP_UNSHIFTED_MASK << IW_L5I4X1_FP_LSB)
+#define GET_IW_L5I4X1_FP(W) (((W) >> IW_L5I4X1_FP_LSB) & IW_L5I4X1_FP_UNSHIFTED_MASK)
+#define SET_IW_L5I4X1_FP(V) (((V) & IW_L5I4X1_FP_UNSHIFTED_MASK) << IW_L5I4X1_FP_LSB)
+
+#define IW_L5I4X1_CS_LSB 14
+#define IW_L5I4X1_CS_SIZE 1
+#define IW_L5I4X1_CS_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_CS_SIZE))
+#define IW_L5I4X1_CS_SHIFTED_MASK (IW_L5I4X1_CS_UNSHIFTED_MASK << IW_L5I4X1_CS_LSB)
+#define GET_IW_L5I4X1_CS(W) (((W) >> IW_L5I4X1_CS_LSB) & IW_L5I4X1_CS_UNSHIFTED_MASK)
+#define SET_IW_L5I4X1_CS(V) (((V) & IW_L5I4X1_CS_UNSHIFTED_MASK) << IW_L5I4X1_CS_LSB)
+
+#define IW_L5I4X1_X_LSB 15
+#define IW_L5I4X1_X_SIZE 1
+#define IW_L5I4X1_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_L5I4X1_X_SIZE))
+#define IW_L5I4X1_X_SHIFTED_MASK (IW_L5I4X1_X_UNSHIFTED_MASK << IW_L5I4X1_X_LSB)
+#define GET_IW_L5I4X1_X(W) (((W) >> IW_L5I4X1_X_LSB) & IW_L5I4X1_X_UNSHIFTED_MASK)
+#define SET_IW_L5I4X1_X(V) (((V) & IW_L5I4X1_X_UNSHIFTED_MASK) << IW_L5I4X1_X_LSB)
+
+#define IW_T2X1L3_A3_LSB 6
+#define IW_T2X1L3_A3_SIZE 3
+#define IW_T2X1L3_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_A3_SIZE))
+#define IW_T2X1L3_A3_SHIFTED_MASK (IW_T2X1L3_A3_UNSHIFTED_MASK << IW_T2X1L3_A3_LSB)
+#define GET_IW_T2X1L3_A3(W) (((W) >> IW_T2X1L3_A3_LSB) & IW_T2X1L3_A3_UNSHIFTED_MASK)
+#define SET_IW_T2X1L3_A3(V) (((V) & IW_T2X1L3_A3_UNSHIFTED_MASK) << IW_T2X1L3_A3_LSB)
+
+#define IW_T2X1L3_B3_LSB 9
+#define IW_T2X1L3_B3_SIZE 3
+#define IW_T2X1L3_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_B3_SIZE))
+#define IW_T2X1L3_B3_SHIFTED_MASK (IW_T2X1L3_B3_UNSHIFTED_MASK << IW_T2X1L3_B3_LSB)
+#define GET_IW_T2X1L3_B3(W) (((W) >> IW_T2X1L3_B3_LSB) & IW_T2X1L3_B3_UNSHIFTED_MASK)
+#define SET_IW_T2X1L3_B3(V) (((V) & IW_T2X1L3_B3_UNSHIFTED_MASK) << IW_T2X1L3_B3_LSB)
+
+#define IW_T2X1L3_SHAMT_LSB 12
+#define IW_T2X1L3_SHAMT_SIZE 3
+#define IW_T2X1L3_SHAMT_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_SHAMT_SIZE))
+#define IW_T2X1L3_SHAMT_SHIFTED_MASK (IW_T2X1L3_SHAMT_UNSHIFTED_MASK << IW_T2X1L3_SHAMT_LSB)
+#define GET_IW_T2X1L3_SHAMT(W) (((W) >> IW_T2X1L3_SHAMT_LSB) & IW_T2X1L3_SHAMT_UNSHIFTED_MASK)
+#define SET_IW_T2X1L3_SHAMT(V) (((V) & IW_T2X1L3_SHAMT_UNSHIFTED_MASK) << IW_T2X1L3_SHAMT_LSB)
+
+#define IW_T2X1L3_X_LSB 15
+#define IW_T2X1L3_X_SIZE 1
+#define IW_T2X1L3_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1L3_X_SIZE))
+#define IW_T2X1L3_X_SHIFTED_MASK (IW_T2X1L3_X_UNSHIFTED_MASK << IW_T2X1L3_X_LSB)
+#define GET_IW_T2X1L3_X(W) (((W) >> IW_T2X1L3_X_LSB) & IW_T2X1L3_X_UNSHIFTED_MASK)
+#define SET_IW_T2X1L3_X(V) (((V) & IW_T2X1L3_X_UNSHIFTED_MASK) << IW_T2X1L3_X_LSB)
+
+#define IW_T2X1I3_A3_LSB 6
+#define IW_T2X1I3_A3_SIZE 3
+#define IW_T2X1I3_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_A3_SIZE))
+#define IW_T2X1I3_A3_SHIFTED_MASK (IW_T2X1I3_A3_UNSHIFTED_MASK << IW_T2X1I3_A3_LSB)
+#define GET_IW_T2X1I3_A3(W) (((W) >> IW_T2X1I3_A3_LSB) & IW_T2X1I3_A3_UNSHIFTED_MASK)
+#define SET_IW_T2X1I3_A3(V) (((V) & IW_T2X1I3_A3_UNSHIFTED_MASK) << IW_T2X1I3_A3_LSB)
+
+#define IW_T2X1I3_B3_LSB 9
+#define IW_T2X1I3_B3_SIZE 3
+#define IW_T2X1I3_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_B3_SIZE))
+#define IW_T2X1I3_B3_SHIFTED_MASK (IW_T2X1I3_B3_UNSHIFTED_MASK << IW_T2X1I3_B3_LSB)
+#define GET_IW_T2X1I3_B3(W) (((W) >> IW_T2X1I3_B3_LSB) & IW_T2X1I3_B3_UNSHIFTED_MASK)
+#define SET_IW_T2X1I3_B3(V) (((V) & IW_T2X1I3_B3_UNSHIFTED_MASK) << IW_T2X1I3_B3_LSB)
+
+#define IW_T2X1I3_IMM3_LSB 12
+#define IW_T2X1I3_IMM3_SIZE 3
+#define IW_T2X1I3_IMM3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_IMM3_SIZE))
+#define IW_T2X1I3_IMM3_SHIFTED_MASK (IW_T2X1I3_IMM3_UNSHIFTED_MASK << IW_T2X1I3_IMM3_LSB)
+#define GET_IW_T2X1I3_IMM3(W) (((W) >> IW_T2X1I3_IMM3_LSB) & IW_T2X1I3_IMM3_UNSHIFTED_MASK)
+#define SET_IW_T2X1I3_IMM3(V) (((V) & IW_T2X1I3_IMM3_UNSHIFTED_MASK) << IW_T2X1I3_IMM3_LSB)
+
+#define IW_T2X1I3_X_LSB 15
+#define IW_T2X1I3_X_SIZE 1
+#define IW_T2X1I3_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X1I3_X_SIZE))
+#define IW_T2X1I3_X_SHIFTED_MASK (IW_T2X1I3_X_UNSHIFTED_MASK << IW_T2X1I3_X_LSB)
+#define GET_IW_T2X1I3_X(W) (((W) >> IW_T2X1I3_X_LSB) & IW_T2X1I3_X_UNSHIFTED_MASK)
+#define SET_IW_T2X1I3_X(V) (((V) & IW_T2X1I3_X_UNSHIFTED_MASK) << IW_T2X1I3_X_LSB)
+
+#define IW_T3X1_A3_LSB 6
+#define IW_T3X1_A3_SIZE 3
+#define IW_T3X1_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_A3_SIZE))
+#define IW_T3X1_A3_SHIFTED_MASK (IW_T3X1_A3_UNSHIFTED_MASK << IW_T3X1_A3_LSB)
+#define GET_IW_T3X1_A3(W) (((W) >> IW_T3X1_A3_LSB) & IW_T3X1_A3_UNSHIFTED_MASK)
+#define SET_IW_T3X1_A3(V) (((V) & IW_T3X1_A3_UNSHIFTED_MASK) << IW_T3X1_A3_LSB)
+
+#define IW_T3X1_B3_LSB 9
+#define IW_T3X1_B3_SIZE 3
+#define IW_T3X1_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_B3_SIZE))
+#define IW_T3X1_B3_SHIFTED_MASK (IW_T3X1_B3_UNSHIFTED_MASK << IW_T3X1_B3_LSB)
+#define GET_IW_T3X1_B3(W) (((W) >> IW_T3X1_B3_LSB) & IW_T3X1_B3_UNSHIFTED_MASK)
+#define SET_IW_T3X1_B3(V) (((V) & IW_T3X1_B3_UNSHIFTED_MASK) << IW_T3X1_B3_LSB)
+
+#define IW_T3X1_C3_LSB 12
+#define IW_T3X1_C3_SIZE 3
+#define IW_T3X1_C3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_C3_SIZE))
+#define IW_T3X1_C3_SHIFTED_MASK (IW_T3X1_C3_UNSHIFTED_MASK << IW_T3X1_C3_LSB)
+#define GET_IW_T3X1_C3(W) (((W) >> IW_T3X1_C3_LSB) & IW_T3X1_C3_UNSHIFTED_MASK)
+#define SET_IW_T3X1_C3(V) (((V) & IW_T3X1_C3_UNSHIFTED_MASK) << IW_T3X1_C3_LSB)
+
+#define IW_T3X1_X_LSB 15
+#define IW_T3X1_X_SIZE 1
+#define IW_T3X1_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T3X1_X_SIZE))
+#define IW_T3X1_X_SHIFTED_MASK (IW_T3X1_X_UNSHIFTED_MASK << IW_T3X1_X_LSB)
+#define GET_IW_T3X1_X(W) (((W) >> IW_T3X1_X_LSB) & IW_T3X1_X_UNSHIFTED_MASK)
+#define SET_IW_T3X1_X(V) (((V) & IW_T3X1_X_UNSHIFTED_MASK) << IW_T3X1_X_LSB)
+
+/* The X field for all three R.N-class instruction formats is represented
+ here as 4 bits, including the bits defined as constant 0 or 1 that
+ determine which of the formats T2X3, F1X1, or X2L5 it is. */
+#define IW_R_N_X_LSB 12
+#define IW_R_N_X_SIZE 4
+#define IW_R_N_X_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_R_N_X_SIZE))
+#define IW_R_N_X_SHIFTED_MASK (IW_R_N_X_UNSHIFTED_MASK << IW_R_N_X_LSB)
+#define GET_IW_R_N_X(W) (((W) >> IW_R_N_X_LSB) & IW_R_N_X_UNSHIFTED_MASK)
+#define SET_IW_R_N_X(V) (((V) & IW_R_N_X_UNSHIFTED_MASK) << IW_R_N_X_LSB)
+
+#define IW_T2X3_A3_LSB 6
+#define IW_T2X3_A3_SIZE 3
+#define IW_T2X3_A3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X3_A3_SIZE))
+#define IW_T2X3_A3_SHIFTED_MASK (IW_T2X3_A3_UNSHIFTED_MASK << IW_T2X3_A3_LSB)
+#define GET_IW_T2X3_A3(W) (((W) >> IW_T2X3_A3_LSB) & IW_T2X3_A3_UNSHIFTED_MASK)
+#define SET_IW_T2X3_A3(V) (((V) & IW_T2X3_A3_UNSHIFTED_MASK) << IW_T2X3_A3_LSB)
+
+#define IW_T2X3_B3_LSB 9
+#define IW_T2X3_B3_SIZE 3
+#define IW_T2X3_B3_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_T2X3_B3_SIZE))
+#define IW_T2X3_B3_SHIFTED_MASK (IW_T2X3_B3_UNSHIFTED_MASK << IW_T2X3_B3_LSB)
+#define GET_IW_T2X3_B3(W) (((W) >> IW_T2X3_B3_LSB) & IW_T2X3_B3_UNSHIFTED_MASK)
+#define SET_IW_T2X3_B3(V) (((V) & IW_T2X3_B3_UNSHIFTED_MASK) << IW_T2X3_B3_LSB)
+
+#define IW_F1X1_A_LSB 6
+#define IW_F1X1_A_SIZE 5
+#define IW_F1X1_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X1_A_SIZE))
+#define IW_F1X1_A_SHIFTED_MASK (IW_F1X1_A_UNSHIFTED_MASK << IW_F1X1_A_LSB)
+#define GET_IW_F1X1_A(W) (((W) >> IW_F1X1_A_LSB) & IW_F1X1_A_UNSHIFTED_MASK)
+#define SET_IW_F1X1_A(V) (((V) & IW_F1X1_A_UNSHIFTED_MASK) << IW_F1X1_A_LSB)
+
+#define IW_F1X1_RSV_LSB 11
+#define IW_F1X1_RSV_SIZE 1
+#define IW_F1X1_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1X1_RSV_SIZE))
+#define IW_F1X1_RSV_SHIFTED_MASK (IW_F1X1_RSV_UNSHIFTED_MASK << IW_F1X1_RSV_LSB)
+#define GET_IW_F1X1_RSV(W) (((W) >> IW_F1X1_RSV_LSB) & IW_F1X1_RSV_UNSHIFTED_MASK)
+#define SET_IW_F1X1_RSV(V) (((V) & IW_F1X1_RSV_UNSHIFTED_MASK) << IW_F1X1_RSV_LSB)
+
+#define IW_X2L5_IMM5_LSB 6
+#define IW_X2L5_IMM5_SIZE 5
+#define IW_X2L5_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X2L5_IMM5_SIZE))
+#define IW_X2L5_IMM5_SHIFTED_MASK (IW_X2L5_IMM5_UNSHIFTED_MASK << IW_X2L5_IMM5_LSB)
+#define GET_IW_X2L5_IMM5(W) (((W) >> IW_X2L5_IMM5_LSB) & IW_X2L5_IMM5_UNSHIFTED_MASK)
+#define SET_IW_X2L5_IMM5(V) (((V) & IW_X2L5_IMM5_UNSHIFTED_MASK) << IW_X2L5_IMM5_LSB)
+
+#define IW_X2L5_RSV_LSB 11
+#define IW_X2L5_RSV_SIZE 1
+#define IW_X2L5_RSV_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_X2L5_RSV_SIZE))
+#define IW_X2L5_RSV_SHIFTED_MASK (IW_X2L5_RSV_UNSHIFTED_MASK << IW_X2L5_RSV_LSB)
+#define GET_IW_X2L5_RSV(W) (((W) >> IW_X2L5_RSV_LSB) & IW_X2L5_RSV_UNSHIFTED_MASK)
+#define SET_IW_X2L5_RSV(V) (((V) & IW_X2L5_RSV_UNSHIFTED_MASK) << IW_X2L5_RSV_LSB)
+
+#define IW_F1I5_IMM5_LSB 6
+#define IW_F1I5_IMM5_SIZE 5
+#define IW_F1I5_IMM5_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1I5_IMM5_SIZE))
+#define IW_F1I5_IMM5_SHIFTED_MASK (IW_F1I5_IMM5_UNSHIFTED_MASK << IW_F1I5_IMM5_LSB)
+#define GET_IW_F1I5_IMM5(W) (((W) >> IW_F1I5_IMM5_LSB) & IW_F1I5_IMM5_UNSHIFTED_MASK)
+#define SET_IW_F1I5_IMM5(V) (((V) & IW_F1I5_IMM5_UNSHIFTED_MASK) << IW_F1I5_IMM5_LSB)
+
+#define IW_F1I5_B_LSB 11
+#define IW_F1I5_B_SIZE 5
+#define IW_F1I5_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F1I5_B_SIZE))
+#define IW_F1I5_B_SHIFTED_MASK (IW_F1I5_B_UNSHIFTED_MASK << IW_F1I5_B_LSB)
+#define GET_IW_F1I5_B(W) (((W) >> IW_F1I5_B_LSB) & IW_F1I5_B_UNSHIFTED_MASK)
+#define SET_IW_F1I5_B(V) (((V) & IW_F1I5_B_UNSHIFTED_MASK) << IW_F1I5_B_LSB)
+
+#define IW_F2_A_LSB 6
+#define IW_F2_A_SIZE 5
+#define IW_F2_A_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2_A_SIZE))
+#define IW_F2_A_SHIFTED_MASK (IW_F2_A_UNSHIFTED_MASK << IW_F2_A_LSB)
+#define GET_IW_F2_A(W) (((W) >> IW_F2_A_LSB) & IW_F2_A_UNSHIFTED_MASK)
+#define SET_IW_F2_A(V) (((V) & IW_F2_A_UNSHIFTED_MASK) << IW_F2_A_LSB)
+
+#define IW_F2_B_LSB 11
+#define IW_F2_B_SIZE 5
+#define IW_F2_B_UNSHIFTED_MASK (0xffffffffu >> (32 - IW_F2_B_SIZE))
+#define IW_F2_B_SHIFTED_MASK (IW_F2_B_UNSHIFTED_MASK << IW_F2_B_LSB)
+#define GET_IW_F2_B(W) (((W) >> IW_F2_B_LSB) & IW_F2_B_UNSHIFTED_MASK)
+#define SET_IW_F2_B(V) (((V) & IW_F2_B_UNSHIFTED_MASK) << IW_F2_B_LSB)
+
+/* R2 opcodes. */
+#define R2_OP_CALL 0
+#define R2_OP_AS_N 1
+#define R2_OP_BR 2
+#define R2_OP_BR_N 3
+#define R2_OP_ADDI 4
+#define R2_OP_LDBU_N 5
+#define R2_OP_LDBU 6
+#define R2_OP_LDB 7
+#define R2_OP_JMPI 8
+#define R2_OP_R_N 9
+#define R2_OP_ANDI_N 11
+#define R2_OP_ANDI 12
+#define R2_OP_LDHU_N 13
+#define R2_OP_LDHU 14
+#define R2_OP_LDH 15
+#define R2_OP_ASI_N 17
+#define R2_OP_BGE 18
+#define R2_OP_LDWSP_N 19
+#define R2_OP_ORI 20
+#define R2_OP_LDW_N 21
+#define R2_OP_CMPGEI 22
+#define R2_OP_LDW 23
+#define R2_OP_SHI_N 25
+#define R2_OP_BLT 26
+#define R2_OP_MOVI_N 27
+#define R2_OP_XORI 28
+#define R2_OP_STZ_N 29
+#define R2_OP_CMPLTI 30
+#define R2_OP_ANDCI 31
+#define R2_OP_OPX 32
+#define R2_OP_PP_N 33
+#define R2_OP_BNE 34
+#define R2_OP_BNEZ_N 35
+#define R2_OP_MULI 36
+#define R2_OP_STB_N 37
+#define R2_OP_CMPNEI 38
+#define R2_OP_STB 39
+#define R2_OP_I12 40
+#define R2_OP_SPI_N 41
+#define R2_OP_BEQ 42
+#define R2_OP_BEQZ_N 43
+#define R2_OP_ANDHI 44
+#define R2_OP_STH_N 45
+#define R2_OP_CMPEQI 46
+#define R2_OP_STH 47
+#define R2_OP_CUSTOM 48
+#define R2_OP_BGEU 50
+#define R2_OP_STWSP_N 51
+#define R2_OP_ORHI 52
+#define R2_OP_STW_N 53
+#define R2_OP_CMPGEUI 54
+#define R2_OP_STW 55
+#define R2_OP_BLTU 58
+#define R2_OP_MOV_N 59
+#define R2_OP_XORHI 60
+#define R2_OP_SPADDI_N 61
+#define R2_OP_CMPLTUI 62
+#define R2_OP_ANDCHI 63
+
+#define R2_OPX_WRPIE 0
+#define R2_OPX_ERET 1
+#define R2_OPX_ROLI 2
+#define R2_OPX_ROL 3
+#define R2_OPX_FLUSHP 4
+#define R2_OPX_RET 5
+#define R2_OPX_NOR 6
+#define R2_OPX_MULXUU 7
+#define R2_OPX_ENI 8
+#define R2_OPX_BRET 9
+#define R2_OPX_ROR 11
+#define R2_OPX_FLUSHI 12
+#define R2_OPX_JMP 13
+#define R2_OPX_AND 14
+#define R2_OPX_CMPGE 16
+#define R2_OPX_SLLI 18
+#define R2_OPX_SLL 19
+#define R2_OPX_WRPRS 20
+#define R2_OPX_OR 22
+#define R2_OPX_MULXSU 23
+#define R2_OPX_CMPLT 24
+#define R2_OPX_SRLI 26
+#define R2_OPX_SRL 27
+#define R2_OPX_NEXTPC 28
+#define R2_OPX_CALLR 29
+#define R2_OPX_XOR 30
+#define R2_OPX_MULXSS 31
+#define R2_OPX_CMPNE 32
+#define R2_OPX_INSERT 35
+#define R2_OPX_DIVU 36
+#define R2_OPX_DIV 37
+#define R2_OPX_RDCTL 38
+#define R2_OPX_MUL 39
+#define R2_OPX_CMPEQ 40
+#define R2_OPX_INITI 41
+#define R2_OPX_MERGE 43
+#define R2_OPX_HBREAK 44
+#define R2_OPX_TRAP 45
+#define R2_OPX_WRCTL 46
+#define R2_OPX_CMPGEU 48
+#define R2_OPX_ADD 49
+#define R2_OPX_EXTRACT 51
+#define R2_OPX_BREAK 52
+#define R2_OPX_LDEX 53
+#define R2_OPX_SYNC 54
+#define R2_OPX_LDSEX 55
+#define R2_OPX_CMPLTU 56
+#define R2_OPX_SUB 57
+#define R2_OPX_SRAI 58
+#define R2_OPX_SRA 59
+#define R2_OPX_STEX 61
+#define R2_OPX_STSEX 63
+
+#define R2_I12_LDBIO 0
+#define R2_I12_STBIO 1
+#define R2_I12_LDBUIO 2
+#define R2_I12_DCACHE 3
+#define R2_I12_LDHIO 4
+#define R2_I12_STHIO 5
+#define R2_I12_LDHUIO 6
+#define R2_I12_RDPRS 7
+#define R2_I12_LDWIO 8
+#define R2_I12_STWIO 9
+#define R2_I12_LDWM 12
+#define R2_I12_STWM 13
+
+#define R2_DCACHE_INITD 0
+#define R2_DCACHE_INITDA 1
+#define R2_DCACHE_FLUSHD 2
+#define R2_DCACHE_FLUSHDA 3
+
+#define R2_AS_N_ADD_N 0
+#define R2_AS_N_SUB_N 1
+
+#define R2_R_N_AND_N 0
+#define R2_R_N_OR_N 2
+#define R2_R_N_XOR_N 3
+#define R2_R_N_SLL_N 4
+#define R2_R_N_SRL_N 5
+#define R2_R_N_NOT_N 6
+#define R2_R_N_NEG_N 7
+#define R2_R_N_CALLR_N 8
+#define R2_R_N_JMPR_N 10
+#define R2_R_N_BREAK_N 12
+#define R2_R_N_TRAP_N 13
+#define R2_R_N_RET_N 14
+
+#define R2_SPI_N_SPINCI_N 0
+#define R2_SPI_N_SPDECI_N 1
+
+#define R2_ASI_N_ADDI_N 0
+#define R2_ASI_N_SUBI_N 1
+
+#define R2_SHI_N_SLLI_N 0
+#define R2_SHI_N_SRLI_N 1
+
+#define R2_PP_N_POP_N 0
+#define R2_PP_N_PUSH_N 1
+
+#define R2_STZ_N_STWZ_N 0
+#define R2_STZ_N_STBZ_N 1
+
+/* Convenience macros for R2 encodings. */
+
+#define MATCH_R2_OP(NAME) \
+ (SET_IW_R2_OP (R2_OP_##NAME))
+#define MASK_R2_OP \
+ IW_R2_OP_SHIFTED_MASK
+
+#define MATCH_R2_OPX0(NAME) \
+ (SET_IW_R2_OP (R2_OP_OPX) | SET_IW_OPX_X (R2_OPX_##NAME))
+#define MASK_R2_OPX0 \
+ (IW_R2_OP_SHIFTED_MASK | IW_OPX_X_SHIFTED_MASK \
+ | IW_F3X6L5_IMM5_SHIFTED_MASK)
+
+#define MATCH_R2_OPX(NAME, A, B, C) \
+ (MATCH_R2_OPX0 (NAME) | SET_IW_F3X6L5_A (A) | SET_IW_F3X6L5_B (B) \
+ | SET_IW_F3X6L5_C (C))
+#define MASK_R2_OPX(A, B, C, N) \
+ (IW_R2_OP_SHIFTED_MASK | IW_OPX_X_SHIFTED_MASK \
+ | (A ? IW_F3X6L5_A_SHIFTED_MASK : 0) \
+ | (B ? IW_F3X6L5_B_SHIFTED_MASK : 0) \
+ | (C ? IW_F3X6L5_C_SHIFTED_MASK : 0) \
+ | (N ? IW_F3X6L5_IMM5_SHIFTED_MASK : 0))
+
+#define MATCH_R2_I12(NAME) \
+ (SET_IW_R2_OP (R2_OP_I12) | SET_IW_I12_X (R2_I12_##NAME))
+#define MASK_R2_I12 \
+ (IW_R2_OP_SHIFTED_MASK | IW_I12_X_SHIFTED_MASK )
+
+#define MATCH_R2_DCACHE(NAME) \
+ (MATCH_R2_I12(DCACHE) | SET_IW_F1X4I12_X (R2_DCACHE_##NAME))
+#define MASK_R2_DCACHE \
+ (MASK_R2_I12 | IW_F1X4I12_X_SHIFTED_MASK)
+
+#define MATCH_R2_R_N(NAME) \
+ (SET_IW_R2_OP (R2_OP_R_N) | SET_IW_R_N_X (R2_R_N_##NAME))
+#define MASK_R2_R_N \
+ (IW_R2_OP_SHIFTED_MASK | IW_R_N_X_SHIFTED_MASK )
+
+/* Match/mask macros for R2 instructions. */
+
+#define MATCH_R2_ADD MATCH_R2_OPX0 (ADD)
+#define MASK_R2_ADD MASK_R2_OPX0
+#define MATCH_R2_ADDI MATCH_R2_OP (ADDI)
+#define MASK_R2_ADDI MASK_R2_OP
+#define MATCH_R2_ADD_N (MATCH_R2_OP (AS_N) | SET_IW_T3X1_X (R2_AS_N_ADD_N))
+#define MASK_R2_ADD_N (MASK_R2_OP | IW_T3X1_X_SHIFTED_MASK)
+#define MATCH_R2_ADDI_N (MATCH_R2_OP (ASI_N) | SET_IW_T2X1I3_X (R2_ASI_N_ADDI_N))
+#define MASK_R2_ADDI_N (MASK_R2_OP | IW_T2X1I3_X_SHIFTED_MASK)
+#define MATCH_R2_AND MATCH_R2_OPX0 (AND)
+#define MASK_R2_AND MASK_R2_OPX0
+#define MATCH_R2_ANDCHI MATCH_R2_OP (ANDCHI)
+#define MASK_R2_ANDCHI MASK_R2_OP
+#define MATCH_R2_ANDCI MATCH_R2_OP (ANDCI)
+#define MASK_R2_ANDCI MASK_R2_OP
+#define MATCH_R2_ANDHI MATCH_R2_OP (ANDHI)
+#define MASK_R2_ANDHI MASK_R2_OP
+#define MATCH_R2_ANDI MATCH_R2_OP (ANDI)
+#define MASK_R2_ANDI MASK_R2_OP
+#define MATCH_R2_ANDI_N MATCH_R2_OP (ANDI_N)
+#define MASK_R2_ANDI_N MASK_R2_OP
+#define MATCH_R2_AND_N MATCH_R2_R_N (AND_N)
+#define MASK_R2_AND_N MASK_R2_R_N
+#define MATCH_R2_BEQ MATCH_R2_OP (BEQ)
+#define MASK_R2_BEQ MASK_R2_OP
+#define MATCH_R2_BEQZ_N MATCH_R2_OP (BEQZ_N)
+#define MASK_R2_BEQZ_N MASK_R2_OP
+#define MATCH_R2_BGE MATCH_R2_OP (BGE)
+#define MASK_R2_BGE MASK_R2_OP
+#define MATCH_R2_BGEU MATCH_R2_OP (BGEU)
+#define MASK_R2_BGEU MASK_R2_OP
+#define MATCH_R2_BGT MATCH_R2_OP (BLT)
+#define MASK_R2_BGT MASK_R2_OP
+#define MATCH_R2_BGTU MATCH_R2_OP (BLTU)
+#define MASK_R2_BGTU MASK_R2_OP
+#define MATCH_R2_BLE MATCH_R2_OP (BGE)
+#define MASK_R2_BLE MASK_R2_OP
+#define MATCH_R2_BLEU MATCH_R2_OP (BGEU)
+#define MASK_R2_BLEU MASK_R2_OP
+#define MATCH_R2_BLT MATCH_R2_OP (BLT)
+#define MASK_R2_BLT MASK_R2_OP
+#define MATCH_R2_BLTU MATCH_R2_OP (BLTU)
+#define MASK_R2_BLTU MASK_R2_OP
+#define MATCH_R2_BNE MATCH_R2_OP (BNE)
+#define MASK_R2_BNE MASK_R2_OP
+#define MATCH_R2_BNEZ_N MATCH_R2_OP (BNEZ_N)
+#define MASK_R2_BNEZ_N MASK_R2_OP
+#define MATCH_R2_BR MATCH_R2_OP (BR)
+#define MASK_R2_BR MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK | IW_F2I16_B_SHIFTED_MASK
+#define MATCH_R2_BREAK MATCH_R2_OPX (BREAK, 0, 0, 0x1e)
+#define MASK_R2_BREAK MASK_R2_OPX (1, 1, 1, 0)
+#define MATCH_R2_BREAK_N MATCH_R2_R_N (BREAK_N)
+#define MASK_R2_BREAK_N MASK_R2_R_N
+#define MATCH_R2_BRET MATCH_R2_OPX (BRET, 0x1e, 0, 0)
+#define MASK_R2_BRET MASK_R2_OPX (1, 1, 1, 1)
+#define MATCH_R2_BR_N MATCH_R2_OP (BR_N)
+#define MASK_R2_BR_N MASK_R2_OP
+#define MATCH_R2_CALL MATCH_R2_OP (CALL)
+#define MASK_R2_CALL MASK_R2_OP
+#define MATCH_R2_CALLR MATCH_R2_OPX (CALLR, 0, 0, 0x1f)
+#define MASK_R2_CALLR MASK_R2_OPX (0, 1, 1, 1)
+#define MATCH_R2_CALLR_N MATCH_R2_R_N (CALLR_N)
+#define MASK_R2_CALLR_N MASK_R2_R_N
+#define MATCH_R2_CMPEQ MATCH_R2_OPX0 (CMPEQ)
+#define MASK_R2_CMPEQ MASK_R2_OPX0
+#define MATCH_R2_CMPEQI MATCH_R2_OP (CMPEQI)
+#define MASK_R2_CMPEQI MASK_R2_OP
+#define MATCH_R2_CMPGE MATCH_R2_OPX0 (CMPGE)
+#define MASK_R2_CMPGE MASK_R2_OPX0
+#define MATCH_R2_CMPGEI MATCH_R2_OP (CMPGEI)
+#define MASK_R2_CMPGEI MASK_R2_OP
+#define MATCH_R2_CMPGEU MATCH_R2_OPX0 (CMPGEU)
+#define MASK_R2_CMPGEU MASK_R2_OPX0
+#define MATCH_R2_CMPGEUI MATCH_R2_OP (CMPGEUI)
+#define MASK_R2_CMPGEUI MASK_R2_OP
+#define MATCH_R2_CMPGT MATCH_R2_OPX0 (CMPLT)
+#define MASK_R2_CMPGT MASK_R2_OPX0
+#define MATCH_R2_CMPGTI MATCH_R2_OP (CMPGEI)
+#define MASK_R2_CMPGTI MASK_R2_OP
+#define MATCH_R2_CMPGTU MATCH_R2_OPX0 (CMPLTU)
+#define MASK_R2_CMPGTU MASK_R2_OPX0
+#define MATCH_R2_CMPGTUI MATCH_R2_OP (CMPGEUI)
+#define MASK_R2_CMPGTUI MASK_R2_OP
+#define MATCH_R2_CMPLE MATCH_R2_OPX0 (CMPGE)
+#define MASK_R2_CMPLE MASK_R2_OPX0
+#define MATCH_R2_CMPLEI MATCH_R2_OP (CMPLTI)
+#define MASK_R2_CMPLEI MASK_R2_OP
+#define MATCH_R2_CMPLEU MATCH_R2_OPX0 (CMPGEU)
+#define MASK_R2_CMPLEU MASK_R2_OPX0
+#define MATCH_R2_CMPLEUI MATCH_R2_OP (CMPLTUI)
+#define MASK_R2_CMPLEUI MASK_R2_OP
+#define MATCH_R2_CMPLT MATCH_R2_OPX0 (CMPLT)
+#define MASK_R2_CMPLT MASK_R2_OPX0
+#define MATCH_R2_CMPLTI MATCH_R2_OP (CMPLTI)
+#define MASK_R2_CMPLTI MASK_R2_OP
+#define MATCH_R2_CMPLTU MATCH_R2_OPX0 (CMPLTU)
+#define MASK_R2_CMPLTU MASK_R2_OPX0
+#define MATCH_R2_CMPLTUI MATCH_R2_OP (CMPLTUI)
+#define MASK_R2_CMPLTUI MASK_R2_OP
+#define MATCH_R2_CMPNE MATCH_R2_OPX0 (CMPNE)
+#define MASK_R2_CMPNE MASK_R2_OPX0
+#define MATCH_R2_CMPNEI MATCH_R2_OP (CMPNEI)
+#define MASK_R2_CMPNEI MASK_R2_OP
+#define MATCH_R2_CUSTOM MATCH_R2_OP (CUSTOM)
+#define MASK_R2_CUSTOM MASK_R2_OP
+#define MATCH_R2_DIV MATCH_R2_OPX0 (DIV)
+#define MASK_R2_DIV MASK_R2_OPX0
+#define MATCH_R2_DIVU MATCH_R2_OPX0 (DIVU)
+#define MASK_R2_DIVU MASK_R2_OPX0
+#define MATCH_R2_ENI MATCH_R2_OPX (ENI, 0, 0, 0)
+#define MASK_R2_ENI MASK_R2_OPX (1, 1, 1, 0)
+#define MATCH_R2_ERET MATCH_R2_OPX (ERET, 0x1d, 0x1e, 0)
+#define MASK_R2_ERET MASK_R2_OPX (1, 1, 1, 1)
+#define MATCH_R2_EXTRACT MATCH_R2_OPX (EXTRACT, 0, 0, 0)
+#define MASK_R2_EXTRACT MASK_R2_OPX (0, 0, 0, 0)
+#define MATCH_R2_FLUSHD MATCH_R2_DCACHE (FLUSHD)
+#define MASK_R2_FLUSHD MASK_R2_DCACHE
+#define MATCH_R2_FLUSHDA MATCH_R2_DCACHE (FLUSHDA)
+#define MASK_R2_FLUSHDA MASK_R2_DCACHE
+#define MATCH_R2_FLUSHI MATCH_R2_OPX (FLUSHI, 0, 0, 0)
+#define MASK_R2_FLUSHI MASK_R2_OPX (0, 1, 1, 1)
+#define MATCH_R2_FLUSHP MATCH_R2_OPX (FLUSHP, 0, 0, 0)
+#define MASK_R2_FLUSHP MASK_R2_OPX (1, 1, 1, 1)
+#define MATCH_R2_INITD MATCH_R2_DCACHE (INITD)
+#define MASK_R2_INITD MASK_R2_DCACHE
+#define MATCH_R2_INITDA MATCH_R2_DCACHE (INITDA)
+#define MASK_R2_INITDA MASK_R2_DCACHE
+#define MATCH_R2_INITI MATCH_R2_OPX (INITI, 0, 0, 0)
+#define MASK_R2_INITI MASK_R2_OPX (0, 1, 1, 1)
+#define MATCH_R2_INSERT MATCH_R2_OPX (INSERT, 0, 0, 0)
+#define MASK_R2_INSERT MASK_R2_OPX (0, 0, 0, 0)
+#define MATCH_R2_JMP MATCH_R2_OPX (JMP, 0, 0, 0)
+#define MASK_R2_JMP MASK_R2_OPX (0, 1, 1, 1)
+#define MATCH_R2_JMPI MATCH_R2_OP (JMPI)
+#define MASK_R2_JMPI MASK_R2_OP
+#define MATCH_R2_JMPR_N MATCH_R2_R_N (JMPR_N)
+#define MASK_R2_JMPR_N MASK_R2_R_N
+#define MATCH_R2_LDB MATCH_R2_OP (LDB)
+#define MASK_R2_LDB MASK_R2_OP
+#define MATCH_R2_LDBIO MATCH_R2_I12 (LDBIO)
+#define MASK_R2_LDBIO MASK_R2_I12
+#define MATCH_R2_LDBU MATCH_R2_OP (LDBU)
+#define MASK_R2_LDBU MASK_R2_OP
+#define MATCH_R2_LDBUIO MATCH_R2_I12 (LDBUIO)
+#define MASK_R2_LDBUIO MASK_R2_I12
+#define MATCH_R2_LDBU_N MATCH_R2_OP (LDBU_N)
+#define MASK_R2_LDBU_N MASK_R2_OP
+#define MATCH_R2_LDEX MATCH_R2_OPX (LDEX, 0, 0, 0)
+#define MASK_R2_LDEX MASK_R2_OPX (0, 1, 0, 1)
+#define MATCH_R2_LDH MATCH_R2_OP (LDH)
+#define MASK_R2_LDH MASK_R2_OP
+#define MATCH_R2_LDHIO MATCH_R2_I12 (LDHIO)
+#define MASK_R2_LDHIO MASK_R2_I12
+#define MATCH_R2_LDHU MATCH_R2_OP (LDHU)
+#define MASK_R2_LDHU MASK_R2_OP
+#define MATCH_R2_LDHUIO MATCH_R2_I12 (LDHUIO)
+#define MASK_R2_LDHUIO MASK_R2_I12
+#define MATCH_R2_LDHU_N MATCH_R2_OP (LDHU_N)
+#define MASK_R2_LDHU_N MASK_R2_OP
+#define MATCH_R2_LDSEX MATCH_R2_OPX (LDSEX, 0, 0, 0)
+#define MASK_R2_LDSEX MASK_R2_OPX (0, 1, 0, 1)
+#define MATCH_R2_LDW MATCH_R2_OP (LDW)
+#define MASK_R2_LDW MASK_R2_OP
+#define MATCH_R2_LDWIO MATCH_R2_I12 (LDWIO)
+#define MASK_R2_LDWIO MASK_R2_I12
+#define MATCH_R2_LDWM MATCH_R2_I12 (LDWM)
+#define MASK_R2_LDWM MASK_R2_I12
+#define MATCH_R2_LDWSP_N MATCH_R2_OP (LDWSP_N)
+#define MASK_R2_LDWSP_N MASK_R2_OP
+#define MATCH_R2_LDW_N MATCH_R2_OP (LDW_N)
+#define MASK_R2_LDW_N MASK_R2_OP
+#define MATCH_R2_MERGE MATCH_R2_OPX (MERGE, 0, 0, 0)
+#define MASK_R2_MERGE MASK_R2_OPX (0, 0, 0, 0)
+#define MATCH_R2_MOV MATCH_R2_OPX (ADD, 0, 0, 0)
+#define MASK_R2_MOV MASK_R2_OPX (0, 1, 0, 1)
+#define MATCH_R2_MOVHI MATCH_R2_OP (ORHI) | SET_IW_F2I16_A (0)
+#define MASK_R2_MOVHI MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK
+#define MATCH_R2_MOVI MATCH_R2_OP (ADDI) | SET_IW_F2I16_A (0)
+#define MASK_R2_MOVI MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK
+#define MATCH_R2_MOVUI MATCH_R2_OP (ORI) | SET_IW_F2I16_A (0)
+#define MASK_R2_MOVUI MASK_R2_OP | IW_F2I16_A_SHIFTED_MASK
+#define MATCH_R2_MOV_N MATCH_R2_OP (MOV_N)
+#define MASK_R2_MOV_N MASK_R2_OP
+#define MATCH_R2_MOVI_N MATCH_R2_OP (MOVI_N)
+#define MASK_R2_MOVI_N MASK_R2_OP
+#define MATCH_R2_MUL MATCH_R2_OPX0 (MUL)
+#define MASK_R2_MUL MASK_R2_OPX0
+#define MATCH_R2_MULI MATCH_R2_OP (MULI)
+#define MASK_R2_MULI MASK_R2_OP
+#define MATCH_R2_MULXSS MATCH_R2_OPX0 (MULXSS)
+#define MASK_R2_MULXSS MASK_R2_OPX0
+#define MATCH_R2_MULXSU MATCH_R2_OPX0 (MULXSU)
+#define MASK_R2_MULXSU MASK_R2_OPX0
+#define MATCH_R2_MULXUU MATCH_R2_OPX0 (MULXUU)
+#define MASK_R2_MULXUU MASK_R2_OPX0
+#define MATCH_R2_NEG_N MATCH_R2_R_N (NEG_N)
+#define MASK_R2_NEG_N MASK_R2_R_N
+#define MATCH_R2_NEXTPC MATCH_R2_OPX (NEXTPC, 0, 0, 0)
+#define MASK_R2_NEXTPC MASK_R2_OPX (1, 1, 0, 1)
+#define MATCH_R2_NOP MATCH_R2_OPX (ADD, 0, 0, 0)
+#define MASK_R2_NOP MASK_R2_OPX (1, 1, 1, 1)
+#define MATCH_R2_NOP_N (MATCH_R2_OP (MOV_N) | SET_IW_F2_A (0) | SET_IW_F2_B (0))
+#define MASK_R2_NOP_N (MASK_R2_OP | IW_F2_A_SHIFTED_MASK | IW_F2_B_SHIFTED_MASK)
+#define MATCH_R2_NOR MATCH_R2_OPX0 (NOR)
+#define MASK_R2_NOR MASK_R2_OPX0
+#define MATCH_R2_NOT_N MATCH_R2_R_N (NOT_N)
+#define MASK_R2_NOT_N MASK_R2_R_N
+#define MATCH_R2_OR MATCH_R2_OPX0 (OR)
+#define MASK_R2_OR MASK_R2_OPX0
+#define MATCH_R2_OR_N MATCH_R2_R_N (OR_N)
+#define MASK_R2_OR_N MASK_R2_R_N
+#define MATCH_R2_ORHI MATCH_R2_OP (ORHI)
+#define MASK_R2_ORHI MASK_R2_OP
+#define MATCH_R2_ORI MATCH_R2_OP (ORI)
+#define MASK_R2_ORI MASK_R2_OP
+#define MATCH_R2_POP_N (MATCH_R2_OP (PP_N) | SET_IW_L5I4X1_X (R2_PP_N_POP_N))
+#define MASK_R2_POP_N (MASK_R2_OP | IW_L5I4X1_X_SHIFTED_MASK)
+#define MATCH_R2_PUSH_N (MATCH_R2_OP (PP_N) | SET_IW_L5I4X1_X (R2_PP_N_PUSH_N))
+#define MASK_R2_PUSH_N (MASK_R2_OP | IW_L5I4X1_X_SHIFTED_MASK)
+#define MATCH_R2_RDCTL MATCH_R2_OPX (RDCTL, 0, 0, 0)
+#define MASK_R2_RDCTL MASK_R2_OPX (1, 1, 0, 0)
+#define MATCH_R2_RDPRS MATCH_R2_I12 (RDPRS)
+#define MASK_R2_RDPRS MASK_R2_I12
+#define MATCH_R2_RET MATCH_R2_OPX (RET, 0x1f, 0, 0)
+#define MASK_R2_RET MASK_R2_OPX (1, 1, 1, 1)
+#define MATCH_R2_RET_N (MATCH_R2_R_N (RET_N) | SET_IW_X2L5_IMM5 (0))
+#define MASK_R2_RET_N (MASK_R2_R_N | IW_X2L5_IMM5_SHIFTED_MASK)
+#define MATCH_R2_ROL MATCH_R2_OPX0 (ROL)
+#define MASK_R2_ROL MASK_R2_OPX0
+#define MATCH_R2_ROLI MATCH_R2_OPX (ROLI, 0, 0, 0)
+#define MASK_R2_ROLI MASK_R2_OPX (0, 1, 0, 0)
+#define MATCH_R2_ROR MATCH_R2_OPX0 (ROR)
+#define MASK_R2_ROR MASK_R2_OPX0
+#define MATCH_R2_SLL MATCH_R2_OPX0 (SLL)
+#define MASK_R2_SLL MASK_R2_OPX0
+#define MATCH_R2_SLLI MATCH_R2_OPX (SLLI, 0, 0, 0)
+#define MASK_R2_SLLI MASK_R2_OPX (0, 1, 0, 0)
+#define MATCH_R2_SLL_N MATCH_R2_R_N (SLL_N)
+#define MASK_R2_SLL_N MASK_R2_R_N
+#define MATCH_R2_SLLI_N (MATCH_R2_OP (SHI_N) | SET_IW_T2X1L3_X (R2_SHI_N_SLLI_N))
+#define MASK_R2_SLLI_N (MASK_R2_OP | IW_T2X1L3_X_SHIFTED_MASK)
+#define MATCH_R2_SPADDI_N MATCH_R2_OP (SPADDI_N)
+#define MASK_R2_SPADDI_N MASK_R2_OP
+#define MATCH_R2_SPDECI_N (MATCH_R2_OP (SPI_N) | SET_IW_X1I7_X (R2_SPI_N_SPDECI_N))
+#define MASK_R2_SPDECI_N (MASK_R2_OP | IW_X1I7_X_SHIFTED_MASK)
+#define MATCH_R2_SPINCI_N (MATCH_R2_OP (SPI_N) | SET_IW_X1I7_X (R2_SPI_N_SPINCI_N))
+#define MASK_R2_SPINCI_N (MASK_R2_OP | IW_X1I7_X_SHIFTED_MASK)
+#define MATCH_R2_SRA MATCH_R2_OPX0 (SRA)
+#define MASK_R2_SRA MASK_R2_OPX0
+#define MATCH_R2_SRAI MATCH_R2_OPX (SRAI, 0, 0, 0)
+#define MASK_R2_SRAI MASK_R2_OPX (0, 1, 0, 0)
+#define MATCH_R2_SRL MATCH_R2_OPX0 (SRL)
+#define MASK_R2_SRL MASK_R2_OPX0
+#define MATCH_R2_SRLI MATCH_R2_OPX (SRLI, 0, 0, 0)
+#define MASK_R2_SRLI MASK_R2_OPX (0, 1, 0, 0)
+#define MATCH_R2_SRL_N MATCH_R2_R_N (SRL_N)
+#define MASK_R2_SRL_N MASK_R2_R_N
+#define MATCH_R2_SRLI_N (MATCH_R2_OP (SHI_N) | SET_IW_T2X1L3_X (R2_SHI_N_SRLI_N))
+#define MASK_R2_SRLI_N (MASK_R2_OP | IW_T2X1L3_X_SHIFTED_MASK)
+#define MATCH_R2_STB MATCH_R2_OP (STB)
+#define MASK_R2_STB MASK_R2_OP
+#define MATCH_R2_STBIO MATCH_R2_I12 (STBIO)
+#define MASK_R2_STBIO MASK_R2_I12
+#define MATCH_R2_STB_N MATCH_R2_OP (STB_N)
+#define MASK_R2_STB_N MASK_R2_OP
+#define MATCH_R2_STBZ_N (MATCH_R2_OP (STZ_N) | SET_IW_T1X1I6_X (R2_STZ_N_STBZ_N))
+#define MASK_R2_STBZ_N (MASK_R2_OP | IW_T1X1I6_X_SHIFTED_MASK)
+#define MATCH_R2_STEX MATCH_R2_OPX0 (STEX)
+#define MASK_R2_STEX MASK_R2_OPX0
+#define MATCH_R2_STH MATCH_R2_OP (STH)
+#define MASK_R2_STH MASK_R2_OP
+#define MATCH_R2_STHIO MATCH_R2_I12 (STHIO)
+#define MASK_R2_STHIO MASK_R2_I12
+#define MATCH_R2_STH_N MATCH_R2_OP (STH_N)
+#define MASK_R2_STH_N MASK_R2_OP
+#define MATCH_R2_STSEX MATCH_R2_OPX0 (STSEX)
+#define MASK_R2_STSEX MASK_R2_OPX0
+#define MATCH_R2_STW MATCH_R2_OP (STW)
+#define MASK_R2_STW MASK_R2_OP
+#define MATCH_R2_STWIO MATCH_R2_I12 (STWIO)
+#define MASK_R2_STWIO MASK_R2_I12
+#define MATCH_R2_STWM MATCH_R2_I12 (STWM)
+#define MASK_R2_STWM MASK_R2_I12
+#define MATCH_R2_STWSP_N MATCH_R2_OP (STWSP_N)
+#define MASK_R2_STWSP_N MASK_R2_OP
+#define MATCH_R2_STW_N MATCH_R2_OP (STW_N)
+#define MASK_R2_STW_N MASK_R2_OP
+#define MATCH_R2_STWZ_N MATCH_R2_OP (STZ_N)
+#define MASK_R2_STWZ_N MASK_R2_OP
+#define MATCH_R2_SUB MATCH_R2_OPX0 (SUB)
+#define MASK_R2_SUB MASK_R2_OPX0
+#define MATCH_R2_SUBI MATCH_R2_OP (ADDI)
+#define MASK_R2_SUBI MASK_R2_OP
+#define MATCH_R2_SUB_N (MATCH_R2_OP (AS_N) | SET_IW_T3X1_X (R2_AS_N_SUB_N))
+#define MASK_R2_SUB_N (MASK_R2_OP | IW_T3X1_X_SHIFTED_MASK)
+#define MATCH_R2_SUBI_N (MATCH_R2_OP (ASI_N) | SET_IW_T2X1I3_X (R2_ASI_N_SUBI_N))
+#define MASK_R2_SUBI_N (MASK_R2_OP | IW_T2X1I3_X_SHIFTED_MASK)
+#define MATCH_R2_SYNC MATCH_R2_OPX (SYNC, 0, 0, 0)
+#define MASK_R2_SYNC MASK_R2_OPX (1, 1, 1, 1)
+#define MATCH_R2_TRAP MATCH_R2_OPX (TRAP, 0, 0, 0x1d)
+#define MASK_R2_TRAP MASK_R2_OPX (1, 1, 1, 0)
+#define MATCH_R2_TRAP_N MATCH_R2_R_N (TRAP_N)
+#define MASK_R2_TRAP_N MASK_R2_R_N
+#define MATCH_R2_WRCTL MATCH_R2_OPX (WRCTL, 0, 0, 0)
+#define MASK_R2_WRCTL MASK_R2_OPX (0, 1, 1, 0)
+#define MATCH_R2_WRPIE MATCH_R2_OPX (WRPIE, 0, 0, 0)
+#define MASK_R2_WRPIE MASK_R2_OPX (0, 1, 0, 1)
+#define MATCH_R2_WRPRS MATCH_R2_OPX (WRPRS, 0, 0, 0)
+#define MASK_R2_WRPRS MASK_R2_OPX (0, 1, 0, 1)
+#define MATCH_R2_XOR MATCH_R2_OPX0 (XOR)
+#define MASK_R2_XOR MASK_R2_OPX0
+#define MATCH_R2_XORHI MATCH_R2_OP (XORHI)
+#define MASK_R2_XORHI MASK_R2_OP
+#define MATCH_R2_XORI MATCH_R2_OP (XORI)
+#define MASK_R2_XORI MASK_R2_OP
+#define MATCH_R2_XOR_N MATCH_R2_R_N (XOR_N)
+#define MASK_R2_XOR_N MASK_R2_R_N
+
+#endif /* _NIOS2R2_H */
+
+
+/* These are the data structures used to hold the instruction information. */
+extern const struct nios2_opcode nios2_r1_opcodes[];
+extern const int nios2_num_r1_opcodes;
+extern const struct nios2_opcode nios2_r2_opcodes[];
+extern const int nios2_num_r2_opcodes;
+extern struct nios2_opcode *nios2_opcodes;
+extern int nios2_num_opcodes;
+
+/* These are the data structures used to hold the register information. */
+extern const struct nios2_reg nios2_builtin_regs[];
+extern struct nios2_reg *nios2_regs;
+extern const int nios2_num_builtin_regs;
+extern int nios2_num_regs;
+
+/* Return the opcode descriptor for a single instruction. */
+extern const struct nios2_opcode *
+nios2_find_opcode_hash (unsigned long, unsigned long);
+
+/* Lookup tables for R2 immediate decodings. */
+extern unsigned int nios2_r2_asi_n_mappings[];
+extern const int nios2_num_r2_asi_n_mappings;
+extern unsigned int nios2_r2_shi_n_mappings[];
+extern const int nios2_num_r2_shi_n_mappings;
+extern unsigned int nios2_r2_andi_n_mappings[];
+extern const int nios2_num_r2_andi_n_mappings;
+
+/* Lookup table for 3-bit register decodings. */
+extern int nios2_r2_reg3_mappings[];
+extern const int nios2_num_r2_reg3_mappings;
+
+/* Lookup table for REG_RANGE value list decodings. */
+extern unsigned long nios2_r2_reg_range_mappings[];
+extern const int nios2_num_r2_reg_range_mappings;
+
+#endif /* _NIOS2_H */
+
+/*#include "sysdep.h"
+#include <stdio.h>
+#include "opcode/nios2.h"
+*/
+/* Register string table */
+
+const struct nios2_reg nios2_builtin_regs[] = {
+ /* Standard register names. */
+ {"zero", 0, REG_NORMAL},
+ {"at", 1, REG_NORMAL}, /* assembler temporary */
+ {"r2", 2, REG_NORMAL | REG_3BIT | REG_LDWM},
+ {"r3", 3, REG_NORMAL | REG_3BIT | REG_LDWM},
+ {"r4", 4, REG_NORMAL | REG_3BIT | REG_LDWM},
+ {"r5", 5, REG_NORMAL | REG_3BIT | REG_LDWM},
+ {"r6", 6, REG_NORMAL | REG_3BIT | REG_LDWM},
+ {"r7", 7, REG_NORMAL | REG_3BIT | REG_LDWM},
+ {"r8", 8, REG_NORMAL | REG_LDWM},
+ {"r9", 9, REG_NORMAL | REG_LDWM},
+ {"r10", 10, REG_NORMAL | REG_LDWM},
+ {"r11", 11, REG_NORMAL | REG_LDWM},
+ {"r12", 12, REG_NORMAL | REG_LDWM},
+ {"r13", 13, REG_NORMAL | REG_LDWM},
+ {"r14", 14, REG_NORMAL | REG_LDWM},
+ {"r15", 15, REG_NORMAL | REG_LDWM},
+ {"r16", 16, REG_NORMAL | REG_3BIT | REG_LDWM | REG_POP},
+ {"r17", 17, REG_NORMAL | REG_3BIT | REG_LDWM | REG_POP},
+ {"r18", 18, REG_NORMAL | REG_LDWM | REG_POP},
+ {"r19", 19, REG_NORMAL | REG_LDWM | REG_POP},
+ {"r20", 20, REG_NORMAL | REG_LDWM | REG_POP},
+ {"r21", 21, REG_NORMAL | REG_LDWM | REG_POP},
+ {"r22", 22, REG_NORMAL | REG_LDWM | REG_POP},
+ {"r23", 23, REG_NORMAL | REG_LDWM | REG_POP},
+ {"et", 24, REG_NORMAL},
+ {"bt", 25, REG_NORMAL},
+ {"gp", 26, REG_NORMAL}, /* global pointer */
+ {"sp", 27, REG_NORMAL}, /* stack pointer */
+ {"fp", 28, REG_NORMAL | REG_LDWM | REG_POP}, /* frame pointer */
+ {"ea", 29, REG_NORMAL}, /* exception return address */
+ {"sstatus", 30, REG_NORMAL}, /* saved processor status */
+ {"ra", 31, REG_NORMAL | REG_LDWM | REG_POP}, /* return address */
+
+ /* Alternative names for special registers. */
+ {"r0", 0, REG_NORMAL},
+ {"r1", 1, REG_NORMAL},
+ {"r24", 24, REG_NORMAL},
+ {"r25", 25, REG_NORMAL},
+ {"r26", 26, REG_NORMAL},
+ {"r27", 27, REG_NORMAL},
+ {"r28", 28, REG_NORMAL | REG_LDWM | REG_POP},
+ {"r29", 29, REG_NORMAL},
+ {"r30", 30, REG_NORMAL},
+ {"ba", 30, REG_NORMAL}, /* breakpoint return address */
+ {"r31", 31, REG_NORMAL | REG_LDWM | REG_POP},
+
+ /* Control register names. */
+ {"status", 0, REG_CONTROL},
+ {"estatus", 1, REG_CONTROL},
+ {"bstatus", 2, REG_CONTROL},
+ {"ienable", 3, REG_CONTROL},
+ {"ipending", 4, REG_CONTROL},
+ {"cpuid", 5, REG_CONTROL},
+ {"ctl6", 6, REG_CONTROL},
+ {"exception", 7, REG_CONTROL},
+ {"pteaddr", 8, REG_CONTROL},
+ {"tlbacc", 9, REG_CONTROL},
+ {"tlbmisc", 10, REG_CONTROL},
+ {"eccinj", 11, REG_CONTROL},
+ {"badaddr", 12, REG_CONTROL},
+ {"config", 13, REG_CONTROL},
+ {"mpubase", 14, REG_CONTROL},
+ {"mpuacc", 15, REG_CONTROL},
+ {"ctl16", 16, REG_CONTROL},
+ {"ctl17", 17, REG_CONTROL},
+ {"ctl18", 18, REG_CONTROL},
+ {"ctl19", 19, REG_CONTROL},
+ {"ctl20", 20, REG_CONTROL},
+ {"ctl21", 21, REG_CONTROL},
+ {"ctl22", 22, REG_CONTROL},
+ {"ctl23", 23, REG_CONTROL},
+ {"ctl24", 24, REG_CONTROL},
+ {"ctl25", 25, REG_CONTROL},
+ {"ctl26", 26, REG_CONTROL},
+ {"ctl27", 27, REG_CONTROL},
+ {"ctl28", 28, REG_CONTROL},
+ {"ctl29", 29, REG_CONTROL},
+ {"ctl30", 30, REG_CONTROL},
+ {"ctl31", 31, REG_CONTROL},
+
+ /* Alternative names for special control registers. */
+ {"ctl0", 0, REG_CONTROL},
+ {"ctl1", 1, REG_CONTROL},
+ {"ctl2", 2, REG_CONTROL},
+ {"ctl3", 3, REG_CONTROL},
+ {"ctl4", 4, REG_CONTROL},
+ {"ctl5", 5, REG_CONTROL},
+ {"ctl7", 7, REG_CONTROL},
+ {"ctl8", 8, REG_CONTROL},
+ {"ctl9", 9, REG_CONTROL},
+ {"ctl10", 10, REG_CONTROL},
+ {"ctl11", 11, REG_CONTROL},
+ {"ctl12", 12, REG_CONTROL},
+ {"ctl13", 13, REG_CONTROL},
+ {"ctl14", 14, REG_CONTROL},
+ {"ctl15", 15, REG_CONTROL},
+
+ /* Coprocessor register names. */
+ {"c0", 0, REG_COPROCESSOR},
+ {"c1", 1, REG_COPROCESSOR},
+ {"c2", 2, REG_COPROCESSOR},
+ {"c3", 3, REG_COPROCESSOR},
+ {"c4", 4, REG_COPROCESSOR},
+ {"c5", 5, REG_COPROCESSOR},
+ {"c6", 6, REG_COPROCESSOR},
+ {"c7", 7, REG_COPROCESSOR},
+ {"c8", 8, REG_COPROCESSOR},
+ {"c9", 9, REG_COPROCESSOR},
+ {"c10", 10, REG_COPROCESSOR},
+ {"c11", 11, REG_COPROCESSOR},
+ {"c12", 12, REG_COPROCESSOR},
+ {"c13", 13, REG_COPROCESSOR},
+ {"c14", 14, REG_COPROCESSOR},
+ {"c15", 15, REG_COPROCESSOR},
+ {"c16", 16, REG_COPROCESSOR},
+ {"c17", 17, REG_COPROCESSOR},
+ {"c18", 18, REG_COPROCESSOR},
+ {"c19", 19, REG_COPROCESSOR},
+ {"c20", 20, REG_COPROCESSOR},
+ {"c21", 21, REG_COPROCESSOR},
+ {"c22", 22, REG_COPROCESSOR},
+ {"c23", 23, REG_COPROCESSOR},
+ {"c24", 24, REG_COPROCESSOR},
+ {"c25", 25, REG_COPROCESSOR},
+ {"c26", 26, REG_COPROCESSOR},
+ {"c27", 27, REG_COPROCESSOR},
+ {"c28", 28, REG_COPROCESSOR},
+ {"c29", 29, REG_COPROCESSOR},
+ {"c30", 30, REG_COPROCESSOR},
+ {"c31", 31, REG_COPROCESSOR},
+};
+
+#define NIOS2_NUM_REGS \
+ ((sizeof nios2_builtin_regs) / (sizeof (nios2_builtin_regs[0])))
+const int nios2_num_builtin_regs = NIOS2_NUM_REGS;
+
+/* This is not const in order to allow for dynamic extensions to the
+ built-in instruction set. */
+struct nios2_reg *nios2_regs = (struct nios2_reg *) nios2_builtin_regs;
+int nios2_num_regs = NIOS2_NUM_REGS;
+#undef NIOS2_NUM_REGS
+
+/* This is the opcode table used by the Nios II GNU as, disassembler
+ and GDB. */
+const struct nios2_opcode nios2_r1_opcodes[] =
+{
+ /* { name, args, args_test, num_args, size, format,
+ match, mask, pinfo, overflow } */
+ {"add", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_ADD, MASK_R1_ADD, 0, no_overflow},
+ {"addi", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_ADDI, MASK_R1_ADDI, 0, signed_immed16_overflow},
+ {"and", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_AND, MASK_R1_AND, 0, no_overflow},
+ {"andhi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_ANDHI, MASK_R1_ANDHI, 0, unsigned_immed16_overflow},
+ {"andi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_ANDI, MASK_R1_ANDI, 0, unsigned_immed16_overflow},
+ {"beq", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BEQ, MASK_R1_BEQ, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bge", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BGE, MASK_R1_BGE, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bgeu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BGEU, MASK_R1_BGEU, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bgt", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BGT, MASK_R1_BGT,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bgtu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BGTU, MASK_R1_BGTU,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"ble", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BLE, MASK_R1_BLE,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bleu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BLEU, MASK_R1_BLEU,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"blt", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BLT, MASK_R1_BLT, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bltu", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BLTU, MASK_R1_BLTU, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bne", "s,t,o", "s,t,o,E", 3, 4, iw_i_type,
+ MATCH_R1_BNE, MASK_R1_BNE, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"br", "o", "o,E", 1, 4, iw_i_type,
+ MATCH_R1_BR, MASK_R1_BR, NIOS2_INSN_UBRANCH, branch_target_overflow},
+ {"break", "j", "j,E", 1, 4, iw_r_type,
+ MATCH_R1_BREAK, MASK_R1_BREAK, NIOS2_INSN_OPTARG, no_overflow},
+ {"bret", "", "E", 0, 4, iw_r_type,
+ MATCH_R1_BRET, MASK_R1_BRET, 0, no_overflow},
+ {"call", "m", "m,E", 1, 4, iw_j_type,
+ MATCH_R1_CALL, MASK_R1_CALL, NIOS2_INSN_CALL, call_target_overflow},
+ {"callr", "s", "s,E", 1, 4, iw_r_type,
+ MATCH_R1_CALLR, MASK_R1_CALLR, 0, no_overflow},
+ {"cmpeq", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPEQ, MASK_R1_CMPEQ, 0, no_overflow},
+ {"cmpeqi", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPEQI, MASK_R1_CMPEQI, 0, signed_immed16_overflow},
+ {"cmpge", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPGE, MASK_R1_CMPGE, 0, no_overflow},
+ {"cmpgei", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPGEI, MASK_R1_CMPGEI, 0, signed_immed16_overflow},
+ {"cmpgeu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPGEU, MASK_R1_CMPGEU, 0, no_overflow},
+ {"cmpgeui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPGEUI, MASK_R1_CMPGEUI, 0, unsigned_immed16_overflow},
+ {"cmpgt", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPGT, MASK_R1_CMPGT, NIOS2_INSN_MACRO, no_overflow},
+ {"cmpgti", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPGTI, MASK_R1_CMPGTI, NIOS2_INSN_MACRO, signed_immed16_overflow},
+ {"cmpgtu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPGTU, MASK_R1_CMPGTU, NIOS2_INSN_MACRO, no_overflow},
+ {"cmpgtui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPGTUI, MASK_R1_CMPGTUI,
+ NIOS2_INSN_MACRO, unsigned_immed16_overflow},
+ {"cmple", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPLE, MASK_R1_CMPLE, NIOS2_INSN_MACRO, no_overflow},
+ {"cmplei", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPLEI, MASK_R1_CMPLEI, NIOS2_INSN_MACRO, signed_immed16_overflow},
+ {"cmpleu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPLEU, MASK_R1_CMPLEU, NIOS2_INSN_MACRO, no_overflow},
+ {"cmpleui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPLEUI, MASK_R1_CMPLEUI,
+ NIOS2_INSN_MACRO, unsigned_immed16_overflow},
+ {"cmplt", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPLT, MASK_R1_CMPLT, 0, no_overflow},
+ {"cmplti", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPLTI, MASK_R1_CMPLTI, 0, signed_immed16_overflow},
+ {"cmpltu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPLTU, MASK_R1_CMPLTU, 0, no_overflow},
+ {"cmpltui", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPLTUI, MASK_R1_CMPLTUI, 0, unsigned_immed16_overflow},
+ {"cmpne", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_CMPNE, MASK_R1_CMPNE, 0, no_overflow},
+ {"cmpnei", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_CMPNEI, MASK_R1_CMPNEI, 0, signed_immed16_overflow},
+ {"custom", "l,d,s,t", "l,d,s,t,E", 4, 4, iw_custom_type,
+ MATCH_R1_CUSTOM, MASK_R1_CUSTOM, 0, custom_opcode_overflow},
+ {"div", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_DIV, MASK_R1_DIV, 0, no_overflow},
+ {"divu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_DIVU, MASK_R1_DIVU, 0, no_overflow},
+ {"eret", "", "E", 0, 4, iw_r_type,
+ MATCH_R1_ERET, MASK_R1_ERET, 0, no_overflow},
+ {"flushd", "i(s)", "i(s),E", 2, 4, iw_i_type,
+ MATCH_R1_FLUSHD, MASK_R1_FLUSHD, 0, address_offset_overflow},
+ {"flushda", "i(s)", "i(s),E", 2, 4, iw_i_type,
+ MATCH_R1_FLUSHDA, MASK_R1_FLUSHDA, 0, address_offset_overflow},
+ {"flushi", "s", "s,E", 1, 4, iw_r_type,
+ MATCH_R1_FLUSHI, MASK_R1_FLUSHI, 0, no_overflow},
+ {"flushp", "", "E", 0, 4, iw_r_type,
+ MATCH_R1_FLUSHP, MASK_R1_FLUSHP, 0, no_overflow},
+ {"initd", "i(s)", "i(s),E", 2, 4, iw_i_type,
+ MATCH_R1_INITD, MASK_R1_INITD, 0, address_offset_overflow},
+ {"initda", "i(s)", "i(s),E", 2, 4, iw_i_type,
+ MATCH_R1_INITDA, MASK_R1_INITDA, 0, address_offset_overflow},
+ {"initi", "s", "s,E", 1, 4, iw_r_type,
+ MATCH_R1_INITI, MASK_R1_INITI, 0, no_overflow},
+ {"jmp", "s", "s,E", 1, 4, iw_r_type,
+ MATCH_R1_JMP, MASK_R1_JMP, 0, no_overflow},
+ {"jmpi", "m", "m,E", 1, 4, iw_j_type,
+ MATCH_R1_JMPI, MASK_R1_JMPI, 0, call_target_overflow},
+ {"ldb", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDB, MASK_R1_LDB, 0, address_offset_overflow},
+ {"ldbio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDBIO, MASK_R1_LDBIO, 0, address_offset_overflow},
+ {"ldbu", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDBU, MASK_R1_LDBU, 0, address_offset_overflow},
+ {"ldbuio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDBUIO, MASK_R1_LDBUIO, 0, address_offset_overflow},
+ {"ldh", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDH, MASK_R1_LDH, 0, address_offset_overflow},
+ {"ldhio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDHIO, MASK_R1_LDHIO, 0, address_offset_overflow},
+ {"ldhu", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDHU, MASK_R1_LDHU, 0, address_offset_overflow},
+ {"ldhuio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDHUIO, MASK_R1_LDHUIO, 0, address_offset_overflow},
+ {"ldw", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDW, MASK_R1_LDW, 0, address_offset_overflow},
+ {"ldwio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_LDWIO, MASK_R1_LDWIO, 0, address_offset_overflow},
+ {"mov", "d,s", "d,s,E", 2, 4, iw_r_type,
+ MATCH_R1_MOV, MASK_R1_MOV, NIOS2_INSN_MACRO_MOV, no_overflow},
+ {"movhi", "t,u", "t,u,E", 2, 4, iw_i_type,
+ MATCH_R1_MOVHI, MASK_R1_MOVHI,
+ NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow},
+ {"movi", "t,i", "t,i,E", 2, 4, iw_i_type,
+ MATCH_R1_MOVI, MASK_R1_MOVI, NIOS2_INSN_MACRO_MOVI, signed_immed16_overflow},
+ {"movia", "t,o", "t,o,E", 2, 4, iw_i_type,
+ MATCH_R1_ORHI, MASK_R1_ORHI, NIOS2_INSN_MACRO_MOVIA, no_overflow},
+ {"movui", "t,u", "t,u,E", 2, 4, iw_i_type,
+ MATCH_R1_MOVUI, MASK_R1_MOVUI,
+ NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow},
+ {"mul", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_MUL, MASK_R1_MUL, 0, no_overflow},
+ {"muli", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_MULI, MASK_R1_MULI, 0, signed_immed16_overflow},
+ {"mulxss", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_MULXSS, MASK_R1_MULXSS, 0, no_overflow},
+ {"mulxsu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_MULXSU, MASK_R1_MULXSU, 0, no_overflow},
+ {"mulxuu", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_MULXUU, MASK_R1_MULXUU, 0, no_overflow},
+ {"nextpc", "d", "d,E", 1, 4, iw_r_type,
+ MATCH_R1_NEXTPC, MASK_R1_NEXTPC, 0, no_overflow},
+ {"nop", "", "E", 0, 4, iw_r_type,
+ MATCH_R1_NOP, MASK_R1_NOP, NIOS2_INSN_MACRO_MOV, no_overflow},
+ {"nor", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_NOR, MASK_R1_NOR, 0, no_overflow},
+ {"or", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_OR, MASK_R1_OR, 0, no_overflow},
+ {"orhi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_ORHI, MASK_R1_ORHI, 0, unsigned_immed16_overflow},
+ {"ori", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_ORI, MASK_R1_ORI, 0, unsigned_immed16_overflow},
+ {"rdctl", "d,c", "d,c,E", 2, 4, iw_r_type,
+ MATCH_R1_RDCTL, MASK_R1_RDCTL, 0, no_overflow},
+ {"rdprs", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_RDPRS, MASK_R1_RDPRS, 0, signed_immed16_overflow},
+ {"ret", "", "E", 0, 4, iw_r_type,
+ MATCH_R1_RET, MASK_R1_RET, 0, no_overflow},
+ {"rol", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_ROL, MASK_R1_ROL, 0, no_overflow},
+ {"roli", "d,s,j", "d,s,j,E", 3, 4, iw_r_type,
+ MATCH_R1_ROLI, MASK_R1_ROLI, 0, unsigned_immed5_overflow},
+ {"ror", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_ROR, MASK_R1_ROR, 0, no_overflow},
+ {"sll", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_SLL, MASK_R1_SLL, 0, no_overflow},
+ {"slli", "d,s,j", "d,s,j,E", 3, 4, iw_r_type,
+ MATCH_R1_SLLI, MASK_R1_SLLI, 0, unsigned_immed5_overflow},
+ {"sra", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_SRA, MASK_R1_SRA, 0, no_overflow},
+ {"srai", "d,s,j", "d,s,j,E", 3, 4, iw_r_type,
+ MATCH_R1_SRAI, MASK_R1_SRAI, 0, unsigned_immed5_overflow},
+ {"srl", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_SRL, MASK_R1_SRL, 0, no_overflow},
+ {"srli", "d,s,j", "d,s,j,E", 3, 4, iw_r_type,
+ MATCH_R1_SRLI, MASK_R1_SRLI, 0, unsigned_immed5_overflow},
+ {"stb", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_STB, MASK_R1_STB, 0, address_offset_overflow},
+ {"stbio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_STBIO, MASK_R1_STBIO, 0, address_offset_overflow},
+ {"sth", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_STH, MASK_R1_STH, 0, address_offset_overflow},
+ {"sthio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_STHIO, MASK_R1_STHIO, 0, address_offset_overflow},
+ {"stw", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_STW, MASK_R1_STW, 0, address_offset_overflow},
+ {"stwio", "t,i(s)", "t,i(s),E", 3, 4, iw_i_type,
+ MATCH_R1_STWIO, MASK_R1_STWIO, 0, address_offset_overflow},
+ {"sub", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_SUB, MASK_R1_SUB, 0, no_overflow},
+ {"subi", "t,s,i", "t,s,i,E", 3, 4, iw_i_type,
+ MATCH_R1_SUBI, MASK_R1_SUBI, NIOS2_INSN_MACRO, signed_immed16_overflow},
+ {"sync", "", "E", 0, 4, iw_r_type,
+ MATCH_R1_SYNC, MASK_R1_SYNC, 0, no_overflow},
+ {"trap", "j", "j,E", 1, 4, iw_r_type,
+ MATCH_R1_TRAP, MASK_R1_TRAP, NIOS2_INSN_OPTARG, no_overflow},
+ {"wrctl", "c,s", "c,s,E", 2, 4, iw_r_type,
+ MATCH_R1_WRCTL, MASK_R1_WRCTL, 0, no_overflow},
+ {"wrprs", "d,s", "d,s,E", 2, 4, iw_r_type,
+ MATCH_R1_WRPRS, MASK_R1_WRPRS, 0, no_overflow},
+ {"xor", "d,s,t", "d,s,t,E", 3, 4, iw_r_type,
+ MATCH_R1_XOR, MASK_R1_XOR, 0, no_overflow},
+ {"xorhi", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_XORHI, MASK_R1_XORHI, 0, unsigned_immed16_overflow},
+ {"xori", "t,s,u", "t,s,u,E", 3, 4, iw_i_type,
+ MATCH_R1_XORI, MASK_R1_XORI, 0, unsigned_immed16_overflow}
+};
+
+#define NIOS2_NUM_R1_OPCODES \
+ ((sizeof nios2_r1_opcodes) / (sizeof (nios2_r1_opcodes[0])))
+const int nios2_num_r1_opcodes = NIOS2_NUM_R1_OPCODES;
+
+
+const struct nios2_opcode nios2_r2_opcodes[] =
+{
+ /* { name, args, args_test, num_args, size, format,
+ match, mask, pinfo, overflow } */
+ {"add", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_ADD, MASK_R2_ADD, 0, no_overflow},
+ {"addi", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ADDI, MASK_R2_ADDI, 0, signed_immed16_overflow},
+ {"add.n", "D,S,T", "D,S,T,E", 3, 2, iw_T3X1_type,
+ MATCH_R2_ADD_N, MASK_R2_ADD_N, 0, no_overflow},
+ {"addi.n", "D,S,e", "D,S,e,E", 3, 2, iw_T2X1I3_type,
+ MATCH_R2_ADDI_N, MASK_R2_ADDI_N, 0, enumeration_overflow},
+ {"and", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_AND, MASK_R2_AND, 0, no_overflow},
+ {"andchi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ANDCHI, MASK_R2_ANDCHI, 0, unsigned_immed16_overflow},
+ {"andci", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ANDCI, MASK_R2_ANDCI, 0, unsigned_immed16_overflow},
+ {"andhi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ANDHI, MASK_R2_ANDHI, 0, unsigned_immed16_overflow},
+ {"andi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ANDI, MASK_R2_ANDI, 0, unsigned_immed16_overflow},
+ {"andi.n", "T,S,g", "T,S,g,E", 3, 2, iw_T2I4_type,
+ MATCH_R2_ANDI_N, MASK_R2_ANDI_N, 0, enumeration_overflow},
+ {"and.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type,
+ MATCH_R2_AND_N, MASK_R2_AND_N, 0, no_overflow},
+ {"beq", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BEQ, MASK_R2_BEQ, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"beqz.n", "S,P", "S,P,E", 2, 2, iw_T1I7_type,
+ MATCH_R2_BEQZ_N, MASK_R2_BEQZ_N, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bge", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BGE, MASK_R2_BGE, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bgeu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BGEU, MASK_R2_BGEU, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bgt", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BGT, MASK_R2_BGT,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bgtu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BGTU, MASK_R2_BGTU,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"ble", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BLE, MASK_R2_BLE,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bleu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BLEU, MASK_R2_BLEU,
+ NIOS2_INSN_MACRO|NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"blt", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BLT, MASK_R2_BLT, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bltu", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BLTU, MASK_R2_BLTU, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bne", "s,t,o", "s,t,o,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_BNE, MASK_R2_BNE, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"bnez.n", "S,P", "S,P,E", 2, 2, iw_T1I7_type,
+ MATCH_R2_BNEZ_N, MASK_R2_BNEZ_N, NIOS2_INSN_CBRANCH, branch_target_overflow},
+ {"br", "o", "o,E", 1, 4, iw_F2I16_type,
+ MATCH_R2_BR, MASK_R2_BR, NIOS2_INSN_UBRANCH, branch_target_overflow},
+ {"break", "j", "j,E", 1, 4, iw_F3X6L5_type,
+ MATCH_R2_BREAK, MASK_R2_BREAK, NIOS2_INSN_OPTARG, no_overflow},
+ {"break.n", "j", "j,E", 1, 2, iw_X2L5_type,
+ MATCH_R2_BREAK_N, MASK_R2_BREAK_N, NIOS2_INSN_OPTARG, no_overflow},
+ {"bret", "", "E", 0, 4, iw_F3X6_type,
+ MATCH_R2_BRET, MASK_R2_BRET, 0, no_overflow},
+ {"br.n", "O", "O,E", 1, 2, iw_I10_type,
+ MATCH_R2_BR_N, MASK_R2_BR_N, NIOS2_INSN_UBRANCH, branch_target_overflow},
+ {"call", "m", "m,E", 1, 4, iw_L26_type,
+ MATCH_R2_CALL, MASK_R2_CALL, NIOS2_INSN_CALL, call_target_overflow},
+ {"callr", "s", "s,E", 1, 4, iw_F3X6_type,
+ MATCH_R2_CALLR, MASK_R2_CALLR, 0, no_overflow},
+ {"callr.n", "s", "s,E", 1, 2, iw_F1X1_type,
+ MATCH_R2_CALLR_N, MASK_R2_CALLR_N, 0, no_overflow},
+ {"cmpeq", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPEQ, MASK_R2_CMPEQ, 0, no_overflow},
+ {"cmpeqi", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPEQI, MASK_R2_CMPEQI, 0, signed_immed16_overflow},
+ {"cmpge", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPGE, MASK_R2_CMPGE, 0, no_overflow},
+ {"cmpgei", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPGEI, MASK_R2_CMPGEI, 0, signed_immed16_overflow},
+ {"cmpgeu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPGEU, MASK_R2_CMPGEU, 0, no_overflow},
+ {"cmpgeui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPGEUI, MASK_R2_CMPGEUI, 0, unsigned_immed16_overflow},
+ {"cmpgt", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPGT, MASK_R2_CMPGT, NIOS2_INSN_MACRO, no_overflow},
+ {"cmpgti", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPGTI, MASK_R2_CMPGTI, NIOS2_INSN_MACRO, signed_immed16_overflow},
+ {"cmpgtu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPGTU, MASK_R2_CMPGTU, NIOS2_INSN_MACRO, no_overflow},
+ {"cmpgtui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPGTUI, MASK_R2_CMPGTUI,
+ NIOS2_INSN_MACRO, unsigned_immed16_overflow},
+ {"cmple", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPLE, MASK_R2_CMPLE, NIOS2_INSN_MACRO, no_overflow},
+ {"cmplei", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPLEI, MASK_R2_CMPLEI, NIOS2_INSN_MACRO, signed_immed16_overflow},
+ {"cmpleu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPLEU, MASK_R2_CMPLEU, NIOS2_INSN_MACRO, no_overflow},
+ {"cmpleui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPLEUI, MASK_R2_CMPLEUI,
+ NIOS2_INSN_MACRO, unsigned_immed16_overflow},
+ {"cmplt", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPLT, MASK_R2_CMPLT, 0, no_overflow},
+ {"cmplti", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPLTI, MASK_R2_CMPLTI, 0, signed_immed16_overflow},
+ {"cmpltu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPLTU, MASK_R2_CMPLTU, 0, no_overflow},
+ {"cmpltui", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPLTUI, MASK_R2_CMPLTUI, 0, unsigned_immed16_overflow},
+ {"cmpne", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_CMPNE, MASK_R2_CMPNE, 0, no_overflow},
+ {"cmpnei", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_CMPNEI, MASK_R2_CMPNEI, 0, signed_immed16_overflow},
+ {"custom", "l,d,s,t", "l,d,s,t,E", 4, 4, iw_F3X8_type,
+ MATCH_R2_CUSTOM, MASK_R2_CUSTOM, 0, custom_opcode_overflow},
+ {"div", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_DIV, MASK_R2_DIV, 0, no_overflow},
+ {"divu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_DIVU, MASK_R2_DIVU, 0, no_overflow},
+ {"eni", "j", "j,E", 1, 4, iw_F3X6L5_type,
+ MATCH_R2_ENI, MASK_R2_ENI, NIOS2_INSN_OPTARG, no_overflow},
+ {"eret", "", "E", 0, 4, iw_F3X6_type,
+ MATCH_R2_ERET, MASK_R2_ERET, 0, no_overflow},
+ {"extract", "t,s,j,k", "t,s,j,k,E", 4, 4, iw_F2X6L10_type,
+ MATCH_R2_EXTRACT, MASK_R2_EXTRACT, 0, no_overflow},
+ {"flushd", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type,
+ MATCH_R2_FLUSHD, MASK_R2_FLUSHD, 0, address_offset_overflow},
+ {"flushda", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type,
+ MATCH_R2_FLUSHDA, MASK_R2_FLUSHDA, 0, address_offset_overflow},
+ {"flushi", "s", "s,E", 1, 4, iw_F3X6_type,
+ MATCH_R2_FLUSHI, MASK_R2_FLUSHI, 0, no_overflow},
+ {"flushp", "", "E", 0, 4, iw_F3X6_type,
+ MATCH_R2_FLUSHP, MASK_R2_FLUSHP, 0, no_overflow},
+ {"initd", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type,
+ MATCH_R2_INITD, MASK_R2_INITD, 0, address_offset_overflow},
+ {"initda", "I(s)", "I(s),E", 2, 4, iw_F1X4I12_type,
+ MATCH_R2_INITDA, MASK_R2_INITDA, 0, address_offset_overflow},
+ {"initi", "s", "s,E", 1, 4, iw_F3X6_type,
+ MATCH_R2_INITI, MASK_R2_INITI, 0, no_overflow},
+ {"insert", "t,s,j,k", "t,s,j,k,E", 4, 4, iw_F2X6L10_type,
+ MATCH_R2_INSERT, MASK_R2_INSERT, 0, no_overflow},
+ {"jmp", "s", "s,E", 1, 4, iw_F3X6_type,
+ MATCH_R2_JMP, MASK_R2_JMP, 0, no_overflow},
+ {"jmpi", "m", "m,E", 1, 4, iw_L26_type,
+ MATCH_R2_JMPI, MASK_R2_JMPI, 0, call_target_overflow},
+ {"jmpr.n", "s", "s,E", 1, 2, iw_F1X1_type,
+ MATCH_R2_JMPR_N, MASK_R2_JMPR_N, 0, no_overflow},
+ {"ldb", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_LDB, MASK_R2_LDB, 0, address_offset_overflow},
+ {"ldbio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_LDBIO, MASK_R2_LDBIO, 0, signed_immed12_overflow},
+ {"ldbu", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_LDBU, MASK_R2_LDBU, 0, address_offset_overflow},
+ {"ldbuio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_LDBUIO, MASK_R2_LDBUIO, 0, signed_immed12_overflow},
+ {"ldbu.n", "T,Y(S)", "T,Y(S),E", 3, 2, iw_T2I4_type,
+ MATCH_R2_LDBU_N, MASK_R2_LDBU_N, 0, address_offset_overflow},
+ {"ldex", "d,(s)", "d,(s),E", 2, 4, iw_F3X6_type,
+ MATCH_R2_LDEX, MASK_R2_LDEX, 0, no_overflow},
+ {"ldh", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_LDH, MASK_R2_LDH, 0, address_offset_overflow},
+ {"ldhio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_LDHIO, MASK_R2_LDHIO, 0, signed_immed12_overflow},
+ {"ldhu", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_LDHU, MASK_R2_LDHU, 0, address_offset_overflow},
+ {"ldhuio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_LDHUIO, MASK_R2_LDHUIO, 0, signed_immed12_overflow},
+ {"ldhu.n", "T,X(S)", "T,X(S),E", 3, 2, iw_T2I4_type,
+ MATCH_R2_LDHU_N, MASK_R2_LDHU_N, 0, address_offset_overflow},
+ {"ldsex", "d,(s)", "d,(s),E", 2, 4, iw_F3X6_type,
+ MATCH_R2_LDSEX, MASK_R2_LDSEX, 0, no_overflow},
+ {"ldw", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_LDW, MASK_R2_LDW, 0, address_offset_overflow},
+ {"ldwio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_LDWIO, MASK_R2_LDWIO, 0, signed_immed12_overflow},
+ {"ldwm", "R,B", "R,B,E", 2, 4, iw_F1X4L17_type,
+ MATCH_R2_LDWM, MASK_R2_LDWM, 0, no_overflow},
+ {"ldw.n", "T,W(S)", "T,W(S),E", 3, 2, iw_T2I4_type,
+ MATCH_R2_LDW_N, MASK_R2_LDW_N, 0, address_offset_overflow},
+ {"ldwsp.n", "t,V(s)", "t,V(s),E", 3, 2, iw_F1I5_type,
+ MATCH_R2_LDWSP_N, MASK_R2_LDWSP_N, 0, address_offset_overflow},
+ {"merge", "t,s,j,k", "t,s,j,k,E", 4, 4, iw_F2X6L10_type,
+ MATCH_R2_MERGE, MASK_R2_MERGE, 0, no_overflow},
+ {"mov", "d,s", "d,s,E", 2, 4, iw_F3X6_type,
+ MATCH_R2_MOV, MASK_R2_MOV, NIOS2_INSN_MACRO_MOV, no_overflow},
+ {"mov.n", "d,s", "d,s,E", 2, 2, iw_F2_type,
+ MATCH_R2_MOV_N, MASK_R2_MOV_N, 0, no_overflow},
+ {"movi.n", "D,h", "D,h,E", 2, 2, iw_T1I7_type,
+ MATCH_R2_MOVI_N, MASK_R2_MOVI_N, 0, enumeration_overflow},
+ {"movhi", "t,u", "t,u,E", 2, 4, iw_F2I16_type,
+ MATCH_R2_MOVHI, MASK_R2_MOVHI,
+ NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow},
+ {"movi", "t,i", "t,i,E", 2, 4, iw_F2I16_type,
+ MATCH_R2_MOVI, MASK_R2_MOVI, NIOS2_INSN_MACRO_MOVI, signed_immed16_overflow},
+ {"movia", "t,o", "t,o,E", 2, 4, iw_F2I16_type,
+ MATCH_R2_ORHI, MASK_R2_ORHI, NIOS2_INSN_MACRO_MOVIA, no_overflow},
+ {"movui", "t,u", "t,u,E", 2, 4, iw_F2I16_type,
+ MATCH_R2_MOVUI, MASK_R2_MOVUI,
+ NIOS2_INSN_MACRO_MOVI, unsigned_immed16_overflow},
+ {"mul", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_MUL, MASK_R2_MUL, 0, no_overflow},
+ {"muli", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_MULI, MASK_R2_MULI, 0, signed_immed16_overflow},
+ {"mulxss", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_MULXSS, MASK_R2_MULXSS, 0, no_overflow},
+ {"mulxsu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_MULXSU, MASK_R2_MULXSU, 0, no_overflow},
+ {"mulxuu", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_MULXUU, MASK_R2_MULXUU, 0, no_overflow},
+ /* The encoding of the neg.n operands is backwards, not
+ the interpretation -- the first operand is still the
+ destination and the second the source. */
+ {"neg.n", "S,D", "S,D,E", 2, 2, iw_T2X3_type,
+ MATCH_R2_NEG_N, MASK_R2_NEG_N, 0, no_overflow},
+ {"nextpc", "d", "d,E", 1, 4, iw_F3X6_type,
+ MATCH_R2_NEXTPC, MASK_R2_NEXTPC, 0, no_overflow},
+ {"nop", "", "E", 0, 4, iw_F3X6_type,
+ MATCH_R2_NOP, MASK_R2_NOP, NIOS2_INSN_MACRO_MOV, no_overflow},
+ {"nop.n", "", "E", 0, 2, iw_F2_type,
+ MATCH_R2_NOP_N, MASK_R2_NOP_N, NIOS2_INSN_MACRO_MOV, no_overflow},
+ {"nor", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_NOR, MASK_R2_NOR, 0, no_overflow},
+ {"not.n", "D,S", "D,S,E", 2, 2, iw_T2X3_type,
+ MATCH_R2_NOT_N, MASK_R2_NOT_N, 0, no_overflow},
+ {"or", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_OR, MASK_R2_OR, 0, no_overflow},
+ {"orhi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ORHI, MASK_R2_ORHI, 0, unsigned_immed16_overflow},
+ {"ori", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_ORI, MASK_R2_ORI, 0, unsigned_immed16_overflow},
+ {"or.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type,
+ MATCH_R2_OR_N, MASK_R2_OR_N, 0, no_overflow},
+ {"pop.n", "R,W", "R,W,E", 2, 2, iw_L5I4X1_type,
+ MATCH_R2_POP_N, MASK_R2_POP_N, NIOS2_INSN_OPTARG, no_overflow},
+ {"push.n", "R,W", "R,W,E", 2, 2, iw_L5I4X1_type,
+ MATCH_R2_PUSH_N, MASK_R2_PUSH_N, NIOS2_INSN_OPTARG, no_overflow},
+ {"rdctl", "d,c", "d,c,E", 2, 4, iw_F3X6L5_type,
+ MATCH_R2_RDCTL, MASK_R2_RDCTL, 0, no_overflow},
+ {"rdprs", "t,s,I", "t,s,I,E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_RDPRS, MASK_R2_RDPRS, 0, signed_immed12_overflow},
+ {"ret", "", "E", 0, 4, iw_F3X6_type,
+ MATCH_R2_RET, MASK_R2_RET, 0, no_overflow},
+ {"ret.n", "", "E", 0, 2, iw_X2L5_type,
+ MATCH_R2_RET_N, MASK_R2_RET_N, 0, no_overflow},
+ {"rol", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_ROL, MASK_R2_ROL, 0, no_overflow},
+ {"roli", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type,
+ MATCH_R2_ROLI, MASK_R2_ROLI, 0, unsigned_immed5_overflow},
+ {"ror", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_ROR, MASK_R2_ROR, 0, no_overflow},
+ {"sll", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_SLL, MASK_R2_SLL, 0, no_overflow},
+ {"slli", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type,
+ MATCH_R2_SLLI, MASK_R2_SLLI, 0, unsigned_immed5_overflow},
+ {"sll.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type,
+ MATCH_R2_SLL_N, MASK_R2_SLL_N, 0, no_overflow},
+ {"slli.n", "D,S,f", "D,S,f,E", 3, 2, iw_T2X1L3_type,
+ MATCH_R2_SLLI_N, MASK_R2_SLLI_N, 0, enumeration_overflow},
+ {"spaddi.n", "D,U", "D,U,E", 2, 2, iw_T1I7_type,
+ MATCH_R2_SPADDI_N, MASK_R2_SPADDI_N, 0, address_offset_overflow},
+ {"spdeci.n", "U", "U,E", 1, 2, iw_X1I7_type,
+ MATCH_R2_SPDECI_N, MASK_R2_SPDECI_N, 0, address_offset_overflow},
+ {"spinci.n", "U", "U,E", 1, 2, iw_X1I7_type,
+ MATCH_R2_SPINCI_N, MASK_R2_SPINCI_N, 0, address_offset_overflow},
+ {"sra", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_SRA, MASK_R2_SRA, 0, no_overflow},
+ {"srai", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type,
+ MATCH_R2_SRAI, MASK_R2_SRAI, 0, unsigned_immed5_overflow},
+ {"srl", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_SRL, MASK_R2_SRL, 0, no_overflow},
+ {"srli", "d,s,j", "d,s,j,E", 3, 4, iw_F3X6L5_type,
+ MATCH_R2_SRLI, MASK_R2_SRLI, 0, unsigned_immed5_overflow},
+ {"srl.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type,
+ MATCH_R2_SRL_N, MASK_R2_SRL_N, 0, no_overflow},
+ {"srli.n", "D,S,f", "D,S,f,E", 3, 2, iw_T2X1L3_type,
+ MATCH_R2_SRLI_N, MASK_R2_SRLI_N, 0, enumeration_overflow},
+ {"stb", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_STB, MASK_R2_STB, 0, address_offset_overflow},
+ {"stbio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_STBIO, MASK_R2_STBIO, 0, signed_immed12_overflow},
+ {"stb.n", "T,Y(S)", "T,Y(S),E", 3, 2, iw_T2I4_type,
+ MATCH_R2_STB_N, MASK_R2_STB_N, 0, address_offset_overflow},
+ {"stbz.n", "t,M(S)", "t,M(S),E", 3, 2, iw_T1X1I6_type,
+ MATCH_R2_STBZ_N, MASK_R2_STBZ_N, 0, address_offset_overflow},
+ {"stex", "d,t,(s)", "d,t,(s),E", 3, 4, iw_F3X6_type,
+ MATCH_R2_STEX, MASK_R2_STEX, 0, no_overflow},
+ {"sth", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_STH, MASK_R2_STH, 0, address_offset_overflow},
+ {"sthio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_STHIO, MASK_R2_STHIO, 0, signed_immed12_overflow},
+ {"sth.n", "T,X(S)", "T,X(S),E", 3, 2, iw_T2I4_type,
+ MATCH_R2_STH_N, MASK_R2_STH_N, 0, address_offset_overflow},
+ {"stsex", "d,t,(s)", "d,t,(s),E", 3, 4, iw_F3X6_type,
+ MATCH_R2_STSEX, MASK_R2_STSEX, 0, no_overflow},
+ {"stw", "t,i(s)", "t,i(s),E", 3, 4, iw_F2I16_type,
+ MATCH_R2_STW, MASK_R2_STW, 0, address_offset_overflow},
+ {"stwio", "t,I(s)", "t,I(s),E", 3, 4, iw_F2X4I12_type,
+ MATCH_R2_STWIO, MASK_R2_STWIO, 0, signed_immed12_overflow},
+ {"stwm", "R,B", "R,B,E", 2, 4, iw_F1X4L17_type,
+ MATCH_R2_STWM, MASK_R2_STWM, 0, no_overflow},
+ {"stwsp.n", "t,V(s)", "t,V(s),E", 3, 2, iw_F1I5_type,
+ MATCH_R2_STWSP_N, MASK_R2_STWSP_N, 0, address_offset_overflow},
+ {"stw.n", "T,W(S)", "T,W(S),E", 3, 2, iw_T2I4_type,
+ MATCH_R2_STW_N, MASK_R2_STW_N, 0, address_offset_overflow},
+ {"stwz.n", "t,N(S)", "t,N(S),E", 3, 2, iw_T1X1I6_type,
+ MATCH_R2_STWZ_N, MASK_R2_STWZ_N, 0, address_offset_overflow},
+ {"sub", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_SUB, MASK_R2_SUB, 0, no_overflow},
+ {"subi", "t,s,i", "t,s,i,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_SUBI, MASK_R2_SUBI, NIOS2_INSN_MACRO, signed_immed16_overflow},
+ {"sub.n", "D,S,T", "D,S,T,E", 3, 2, iw_T3X1_type,
+ MATCH_R2_SUB_N, MASK_R2_SUB_N, 0, no_overflow},
+ {"subi.n", "D,S,e", "D,S,e,E", 3, 2, iw_T2X1I3_type,
+ MATCH_R2_SUBI_N, MASK_R2_SUBI_N, 0, enumeration_overflow},
+ {"sync", "", "E", 0, 4, iw_F3X6_type,
+ MATCH_R2_SYNC, MASK_R2_SYNC, 0, no_overflow},
+ {"trap", "j", "j,E", 1, 4, iw_F3X6L5_type,
+ MATCH_R2_TRAP, MASK_R2_TRAP, NIOS2_INSN_OPTARG, no_overflow},
+ {"trap.n", "j", "j,E", 1, 2, iw_X2L5_type,
+ MATCH_R2_TRAP_N, MASK_R2_TRAP_N, NIOS2_INSN_OPTARG, no_overflow},
+ {"wrctl", "c,s", "c,s,E", 2, 4, iw_F3X6L5_type,
+ MATCH_R2_WRCTL, MASK_R2_WRCTL, 0, no_overflow},
+ {"wrpie", "d,s", "d,s,E", 2, 4, iw_F3X6L5_type,
+ MATCH_R2_WRPIE, MASK_R2_WRPIE, 0, no_overflow},
+ {"wrprs", "d,s", "d,s,E", 2, 4, iw_F3X6_type,
+ MATCH_R2_WRPRS, MASK_R2_WRPRS, 0, no_overflow},
+ {"xor", "d,s,t", "d,s,t,E", 3, 4, iw_F3X6_type,
+ MATCH_R2_XOR, MASK_R2_XOR, 0, no_overflow},
+ {"xorhi", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_XORHI, MASK_R2_XORHI, 0, unsigned_immed16_overflow},
+ {"xori", "t,s,u", "t,s,u,E", 3, 4, iw_F2I16_type,
+ MATCH_R2_XORI, MASK_R2_XORI, 0, unsigned_immed16_overflow},
+ {"xor.n", "D,S,T", "D,S,T,E", 3, 2, iw_T2X3_type,
+ MATCH_R2_XOR_N, MASK_R2_XOR_N, 0, no_overflow},
+};
+
+#define NIOS2_NUM_R2_OPCODES \
+ ((sizeof nios2_r2_opcodes) / (sizeof (nios2_r2_opcodes[0])))
+const int nios2_num_r2_opcodes = NIOS2_NUM_R2_OPCODES;
+
+/* Default to using the R1 instruction tables. */
+struct nios2_opcode *nios2_opcodes = (struct nios2_opcode *) nios2_r1_opcodes;
+int nios2_num_opcodes = NIOS2_NUM_R1_OPCODES;
+#undef NIOS2_NUM_R1_OPCODES
+#undef NIOS2_NUM_R2_OPCODES
+
+/* Decodings for R2 asi.n (addi.n/subi.n) immediate values. */
+unsigned int nios2_r2_asi_n_mappings[] =
+ {1, 2, 4, 8, 16, 32, 64, 128};
+const int nios2_num_r2_asi_n_mappings = 8;
+
+/* Decodings for R2 shi.n (slli.n/srli.n) immediate values. */
+unsigned int nios2_r2_shi_n_mappings[] =
+ {1, 2, 3, 8, 12, 16, 24, 31};
+const int nios2_num_r2_shi_n_mappings = 8;
+
+/* Decodings for R2 andi.n immediate values. */
+unsigned int nios2_r2_andi_n_mappings[] =
+ {1, 2, 3, 4, 8, 0xf, 0x10, 0x1f,
+ 0x20, 0x3f, 0x7f, 0x80, 0xff, 0x7ff, 0xff00, 0xffff};
+const int nios2_num_r2_andi_n_mappings = 16;
+
+/* Decodings for R2 3-bit register fields. */
+int nios2_r2_reg3_mappings[] =
+ {16, 17, 2, 3, 4, 5, 6, 7};
+const int nios2_num_r2_reg3_mappings = 8;
+
+/* Decodings for R2 push.n/pop.n REG_RANGE value list. */
+unsigned long nios2_r2_reg_range_mappings[] = {
+ 0x00010000,
+ 0x00030000,
+ 0x00070000,
+ 0x000f0000,
+ 0x001f0000,
+ 0x003f0000,
+ 0x007f0000,
+ 0x00ff0000
+};
+const int nios2_num_r2_reg_range_mappings = 8;
+
+/*#include "sysdep.h"
+#include "dis-asm.h"
+#include "opcode/nios2.h"
+#include "libiberty.h"
+#include <string.h>
+#include <assert.h>
+*/
+/* No symbol table is available when this code runs out in an embedded
+ system as when it is used for disassembler support in a monitor. */
+#if !defined(EMBEDDED_ENV)
+#define SYMTAB_AVAILABLE 1
+/*
+#include "elf-bfd.h"
+#include "elf/nios2.h"
+*/
+#endif
+
+/* Default length of Nios II instruction in bytes. */
+#define INSNLEN 4
+
+/* Data structures used by the opcode hash table. */
+typedef struct _nios2_opcode_hash
+{
+ const struct nios2_opcode *opcode;
+ struct _nios2_opcode_hash *next;
+} nios2_opcode_hash;
+
+/* Hash table size. */
+#define OPCODE_HASH_SIZE (IW_R1_OP_UNSHIFTED_MASK + 1)
+
+/* Extract the opcode from an instruction word. */
+static unsigned int
+nios2_r1_extract_opcode (unsigned int x)
+{
+ return GET_IW_R1_OP (x);
+}
+
+static unsigned int
+nios2_r2_extract_opcode (unsigned int x)
+{
+ return GET_IW_R2_OP (x);
+}
+
+/* We maintain separate hash tables for R1 and R2 opcodes, and pseudo-ops
+ are stored in a different table than regular instructions. */
+
+typedef struct _nios2_disassembler_state
+{
+ const struct nios2_opcode *opcodes;
+ const int *num_opcodes;
+ unsigned int (*extract_opcode) (unsigned int);
+ nios2_opcode_hash *hash[OPCODE_HASH_SIZE];
+ nios2_opcode_hash *ps_hash[OPCODE_HASH_SIZE];
+ const struct nios2_opcode *nop;
+ bfd_boolean init;
+} nios2_disassembler_state;
+
+static nios2_disassembler_state
+nios2_r1_disassembler_state = {
+ nios2_r1_opcodes,
+ &nios2_num_r1_opcodes,
+ nios2_r1_extract_opcode,
+ {},
+ {},
+ NULL,
+ 0
+};
+
+static nios2_disassembler_state
+nios2_r2_disassembler_state = {
+ nios2_r2_opcodes,
+ &nios2_num_r2_opcodes,
+ nios2_r2_extract_opcode,
+ {},
+ {},
+ NULL,
+ 0
+};
+
+/* Function to initialize the opcode hash table. */
+static void
+nios2_init_opcode_hash (nios2_disassembler_state *state)
+{
+ unsigned int i;
+ register const struct nios2_opcode *op;
+
+ for (i = 0; i < OPCODE_HASH_SIZE; i++)
+ for (op = state->opcodes; op < &state->opcodes[*(state->num_opcodes)]; op++)
+ {
+ nios2_opcode_hash *new_hash;
+ nios2_opcode_hash **bucket = NULL;
+
+ if ((op->pinfo & NIOS2_INSN_MACRO) == NIOS2_INSN_MACRO)
+ {
+ if (i == state->extract_opcode (op->match)
+ && (op->pinfo & (NIOS2_INSN_MACRO_MOV | NIOS2_INSN_MACRO_MOVI)
+ & 0x7fffffff))
+ {
+ bucket = &(state->ps_hash[i]);
+ if (strcmp (op->name, "nop") == 0)
+ state->nop = op;
+ }
+ }
+ else if (i == state->extract_opcode (op->match))
+ bucket = &(state->hash[i]);
+
+ if (bucket)
+ {
+ new_hash =
+ (nios2_opcode_hash *) malloc (sizeof (nios2_opcode_hash));
+ if (new_hash == NULL)
+ {
+ fprintf (stderr,
+ "error allocating memory...broken disassembler\n");
+ abort ();
+ }
+ new_hash->opcode = op;
+ new_hash->next = NULL;
+ while (*bucket)
+ bucket = &((*bucket)->next);
+ *bucket = new_hash;
+ }
+ }
+ state->init = 1;
+
+#ifdef DEBUG_HASHTABLE
+ for (i = 0; i < OPCODE_HASH_SIZE; ++i)
+ {
+ nios2_opcode_hash *tmp_hash = state->hash[i];
+ printf ("index: 0x%02X ops: ", i);
+ while (tmp_hash != NULL)
+ {
+ printf ("%s ", tmp_hash->opcode->name);
+ tmp_hash = tmp_hash->next;
+ }
+ printf ("\n");
+ }
+
+ for (i = 0; i < OPCODE_HASH_SIZE; ++i)
+ {
+ nios2_opcode_hash *tmp_hash = state->ps_hash[i];
+ printf ("index: 0x%02X ops: ", i);
+ while (tmp_hash != NULL)
+ {
+ printf ("%s ", tmp_hash->opcode->name);
+ tmp_hash = tmp_hash->next;
+ }
+ printf ("\n");
+ }
+#endif /* DEBUG_HASHTABLE */
+}
+
+/* Return a pointer to an nios2_opcode struct for a given instruction
+ word OPCODE for bfd machine MACH, or NULL if there is an error. */
+const struct nios2_opcode *
+nios2_find_opcode_hash (unsigned long opcode, unsigned long mach)
+{
+ nios2_opcode_hash *entry;
+ nios2_disassembler_state *state;
+
+ /* Select the right instruction set, hash tables, and opcode accessor
+ for the mach variant. */
+ if (mach == bfd_mach_nios2r2)
+ state = &nios2_r2_disassembler_state;
+ else
+ state = &nios2_r1_disassembler_state;
+
+ /* Build a hash table to shorten the search time. */
+ if (!state->init)
+ nios2_init_opcode_hash (state);
+
+ /* Check for NOP first. Both NOP and MOV are macros that expand into
+ an ADD instruction, and we always want to give priority to NOP. */
+ if (state->nop->match == (opcode & state->nop->mask))
+ return state->nop;
+
+ /* First look in the pseudo-op hashtable. */
+ for (entry = state->ps_hash[state->extract_opcode (opcode)];
+ entry; entry = entry->next)
+ if (entry->opcode->match == (opcode & entry->opcode->mask))
+ return entry->opcode;
+
+ /* Otherwise look in the main hashtable. */
+ for (entry = state->hash[state->extract_opcode (opcode)];
+ entry; entry = entry->next)
+ if (entry->opcode->match == (opcode & entry->opcode->mask))
+ return entry->opcode;
+
+ return NULL;
+}
+
+/* There are 32 regular registers, 32 coprocessor registers,
+ and 32 control registers. */
+#define NUMREGNAMES 32
+
+/* Return a pointer to the base of the coprocessor register name array. */
+static struct nios2_reg *
+nios2_coprocessor_regs (void)
+{
+ static struct nios2_reg *cached = NULL;
+
+ if (!cached)
+ {
+ int i;
+ for (i = NUMREGNAMES; i < nios2_num_regs; i++)
+ if (!strcmp (nios2_regs[i].name, "c0"))
+ {
+ cached = nios2_regs + i;
+ break;
+ }
+ assert (cached);
+ }
+ return cached;
+}
+
+/* Return a pointer to the base of the control register name array. */
+static struct nios2_reg *
+nios2_control_regs (void)
+{
+ static struct nios2_reg *cached = NULL;
+
+ if (!cached)
+ {
+ int i;
+ for (i = NUMREGNAMES; i < nios2_num_regs; i++)
+ if (!strcmp (nios2_regs[i].name, "status"))
+ {
+ cached = nios2_regs + i;
+ break;
+ }
+ assert (cached);
+ }
+ return cached;
+}
+
+/* Helper routine to report internal errors. */
+static void
+bad_opcode (const struct nios2_opcode *op)
+{
+ fprintf (stderr, "Internal error: broken opcode descriptor for `%s %s'\n",
+ op->name, op->args);
+ abort ();
+}
+
+/* The function nios2_print_insn_arg uses the character pointed
+ to by ARGPTR to determine how it print the next token or separator
+ character in the arguments to an instruction. */
+static int
+nios2_print_insn_arg (const char *argptr,
+ unsigned long opcode, bfd_vma address,
+ disassemble_info *info,
+ const struct nios2_opcode *op)
+{
+ unsigned long i = 0;
+ struct nios2_reg *reg_base;
+
+ switch (*argptr)
+ {
+ case ',':
+ case '(':
+ case ')':
+ (*info->fprintf_func) (info->stream, "%c", *argptr);
+ break;
+
+ case 'c':
+ /* Control register index. */
+ switch (op->format)
+ {
+ case iw_r_type:
+ i = GET_IW_R_IMM5 (opcode);
+ break;
+ case iw_F3X6L5_type:
+ i = GET_IW_F3X6L5_IMM5 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ reg_base = nios2_control_regs ();
+ (*info->fprintf_func) (info->stream, "%s", reg_base[i].name);
+ break;
+
+ case 'd':
+ reg_base = nios2_regs;
+ switch (op->format)
+ {
+ case iw_r_type:
+ i = GET_IW_R_C (opcode);
+ break;
+ case iw_custom_type:
+ i = GET_IW_CUSTOM_C (opcode);
+ if (GET_IW_CUSTOM_READC (opcode) == 0)
+ reg_base = nios2_coprocessor_regs ();
+ break;
+ case iw_F3X6L5_type:
+ case iw_F3X6_type:
+ i = GET_IW_F3X6L5_C (opcode);
+ break;
+ case iw_F3X8_type:
+ i = GET_IW_F3X8_C (opcode);
+ if (GET_IW_F3X8_READC (opcode) == 0)
+ reg_base = nios2_coprocessor_regs ();
+ break;
+ case iw_F2_type:
+ i = GET_IW_F2_B (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ if (i < NUMREGNAMES)
+ (*info->fprintf_func) (info->stream, "%s", reg_base[i].name);
+ else
+ (*info->fprintf_func) (info->stream, "unknown");
+ break;
+
+ case 's':
+ reg_base = nios2_regs;
+ switch (op->format)
+ {
+ case iw_r_type:
+ i = GET_IW_R_A (opcode);
+ break;
+ case iw_i_type:
+ i = GET_IW_I_A (opcode);
+ break;
+ case iw_custom_type:
+ i = GET_IW_CUSTOM_A (opcode);
+ if (GET_IW_CUSTOM_READA (opcode) == 0)
+ reg_base = nios2_coprocessor_regs ();
+ break;
+ case iw_F2I16_type:
+ i = GET_IW_F2I16_A (opcode);
+ break;
+ case iw_F2X4I12_type:
+ i = GET_IW_F2X4I12_A (opcode);
+ break;
+ case iw_F1X4I12_type:
+ i = GET_IW_F1X4I12_A (opcode);
+ break;
+ case iw_F1X4L17_type:
+ i = GET_IW_F1X4L17_A (opcode);
+ break;
+ case iw_F3X6L5_type:
+ case iw_F3X6_type:
+ i = GET_IW_F3X6L5_A (opcode);
+ break;
+ case iw_F2X6L10_type:
+ i = GET_IW_F2X6L10_A (opcode);
+ break;
+ case iw_F3X8_type:
+ i = GET_IW_F3X8_A (opcode);
+ if (GET_IW_F3X8_READA (opcode) == 0)
+ reg_base = nios2_coprocessor_regs ();
+ break;
+ case iw_F1X1_type:
+ i = GET_IW_F1X1_A (opcode);
+ break;
+ case iw_F1I5_type:
+ i = 27; /* Implicit stack pointer reference. */
+ break;
+ case iw_F2_type:
+ i = GET_IW_F2_A (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ if (i < NUMREGNAMES)
+ (*info->fprintf_func) (info->stream, "%s", reg_base[i].name);
+ else
+ (*info->fprintf_func) (info->stream, "unknown");
+ break;
+
+ case 't':
+ reg_base = nios2_regs;
+ switch (op->format)
+ {
+ case iw_r_type:
+ i = GET_IW_R_B (opcode);
+ break;
+ case iw_i_type:
+ i = GET_IW_I_B (opcode);
+ break;
+ case iw_custom_type:
+ i = GET_IW_CUSTOM_B (opcode);
+ if (GET_IW_CUSTOM_READB (opcode) == 0)
+ reg_base = nios2_coprocessor_regs ();
+ break;
+ case iw_F2I16_type:
+ i = GET_IW_F2I16_B (opcode);
+ break;
+ case iw_F2X4I12_type:
+ i = GET_IW_F2X4I12_B (opcode);
+ break;
+ case iw_F3X6L5_type:
+ case iw_F3X6_type:
+ i = GET_IW_F3X6L5_B (opcode);
+ break;
+ case iw_F2X6L10_type:
+ i = GET_IW_F2X6L10_B (opcode);
+ break;
+ case iw_F3X8_type:
+ i = GET_IW_F3X8_B (opcode);
+ if (GET_IW_F3X8_READB (opcode) == 0)
+ reg_base = nios2_coprocessor_regs ();
+ break;
+ case iw_F1I5_type:
+ i = GET_IW_F1I5_B (opcode);
+ break;
+ case iw_F2_type:
+ i = GET_IW_F2_B (opcode);
+ break;
+ case iw_T1X1I6_type:
+ i = 0;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ if (i < NUMREGNAMES)
+ (*info->fprintf_func) (info->stream, "%s", reg_base[i].name);
+ else
+ (*info->fprintf_func) (info->stream, "unknown");
+ break;
+
+ case 'D':
+ switch (op->format)
+ {
+ case iw_T1I7_type:
+ i = GET_IW_T1I7_A3 (opcode);
+ break;
+ case iw_T2X1L3_type:
+ i = GET_IW_T2X1L3_B3 (opcode);
+ break;
+ case iw_T2X1I3_type:
+ i = GET_IW_T2X1I3_B3 (opcode);
+ break;
+ case iw_T3X1_type:
+ i = GET_IW_T3X1_C3 (opcode);
+ break;
+ case iw_T2X3_type:
+ if (op->num_args == 3)
+ i = GET_IW_T2X3_A3 (opcode);
+ else
+ i = GET_IW_T2X3_B3 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ i = nios2_r2_reg3_mappings[i];
+ (*info->fprintf_func) (info->stream, "%s", nios2_regs[i].name);
+ break;
+
+ case 'M':
+ /* 6-bit unsigned immediate with no shift. */
+ switch (op->format)
+ {
+ case iw_T1X1I6_type:
+ i = GET_IW_T1X1I6_IMM6 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'N':
+ /* 6-bit unsigned immediate with 2-bit shift. */
+ switch (op->format)
+ {
+ case iw_T1X1I6_type:
+ i = GET_IW_T1X1I6_IMM6 (opcode) << 2;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'S':
+ switch (op->format)
+ {
+ case iw_T1I7_type:
+ i = GET_IW_T1I7_A3 (opcode);
+ break;
+ case iw_T2I4_type:
+ i = GET_IW_T2I4_A3 (opcode);
+ break;
+ case iw_T2X1L3_type:
+ i = GET_IW_T2X1L3_A3 (opcode);
+ break;
+ case iw_T2X1I3_type:
+ i = GET_IW_T2X1I3_A3 (opcode);
+ break;
+ case iw_T3X1_type:
+ i = GET_IW_T3X1_A3 (opcode);
+ break;
+ case iw_T2X3_type:
+ i = GET_IW_T2X3_A3 (opcode);
+ break;
+ case iw_T1X1I6_type:
+ i = GET_IW_T1X1I6_A3 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ i = nios2_r2_reg3_mappings[i];
+ (*info->fprintf_func) (info->stream, "%s", nios2_regs[i].name);
+ break;
+
+ case 'T':
+ switch (op->format)
+ {
+ case iw_T2I4_type:
+ i = GET_IW_T2I4_B3 (opcode);
+ break;
+ case iw_T3X1_type:
+ i = GET_IW_T3X1_B3 (opcode);
+ break;
+ case iw_T2X3_type:
+ i = GET_IW_T2X3_B3 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ i = nios2_r2_reg3_mappings[i];
+ (*info->fprintf_func) (info->stream, "%s", nios2_regs[i].name);
+ break;
+
+ case 'i':
+ /* 16-bit signed immediate. */
+ switch (op->format)
+ {
+ case iw_i_type:
+ i = (signed) (GET_IW_I_IMM16 (opcode) << 16) >> 16;
+ break;
+ case iw_F2I16_type:
+ i = (signed) (GET_IW_F2I16_IMM16 (opcode) << 16) >> 16;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'I':
+ /* 12-bit signed immediate. */
+ switch (op->format)
+ {
+ case iw_F2X4I12_type:
+ i = (signed) (GET_IW_F2X4I12_IMM12 (opcode) << 20) >> 20;
+ break;
+ case iw_F1X4I12_type:
+ i = (signed) (GET_IW_F1X4I12_IMM12 (opcode) << 20) >> 20;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'u':
+ /* 16-bit unsigned immediate. */
+ switch (op->format)
+ {
+ case iw_i_type:
+ i = GET_IW_I_IMM16 (opcode);
+ break;
+ case iw_F2I16_type:
+ i = GET_IW_F2I16_IMM16 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'U':
+ /* 7-bit unsigned immediate with 2-bit shift. */
+ switch (op->format)
+ {
+ case iw_T1I7_type:
+ i = GET_IW_T1I7_IMM7 (opcode) << 2;
+ break;
+ case iw_X1I7_type:
+ i = GET_IW_X1I7_IMM7 (opcode) << 2;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'V':
+ /* 5-bit unsigned immediate with 2-bit shift. */
+ switch (op->format)
+ {
+ case iw_F1I5_type:
+ i = GET_IW_F1I5_IMM5 (opcode) << 2;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'W':
+ /* 4-bit unsigned immediate with 2-bit shift. */
+ switch (op->format)
+ {
+ case iw_T2I4_type:
+ i = GET_IW_T2I4_IMM4 (opcode) << 2;
+ break;
+ case iw_L5I4X1_type:
+ i = GET_IW_L5I4X1_IMM4 (opcode) << 2;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'X':
+ /* 4-bit unsigned immediate with 1-bit shift. */
+ switch (op->format)
+ {
+ case iw_T2I4_type:
+ i = GET_IW_T2I4_IMM4 (opcode) << 1;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'Y':
+ /* 4-bit unsigned immediate without shift. */
+ switch (op->format)
+ {
+ case iw_T2I4_type:
+ i = GET_IW_T2I4_IMM4 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'o':
+ /* 16-bit signed immediate address offset. */
+ switch (op->format)
+ {
+ case iw_i_type:
+ i = (signed) (GET_IW_I_IMM16 (opcode) << 16) >> 16;
+ break;
+ case iw_F2I16_type:
+ i = (signed) (GET_IW_F2I16_IMM16 (opcode) << 16) >> 16;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ address = address + 4 + i;
+ (*info->print_address_func) (address, info);
+ break;
+
+ case 'O':
+ /* 10-bit signed address offset with 1-bit shift. */
+ switch (op->format)
+ {
+ case iw_I10_type:
+ i = (signed) (GET_IW_I10_IMM10 (opcode) << 22) >> 21;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ address = address + 2 + i;
+ (*info->print_address_func) (address, info);
+ break;
+
+ case 'P':
+ /* 7-bit signed address offset with 1-bit shift. */
+ switch (op->format)
+ {
+ case iw_T1I7_type:
+ i = (signed) (GET_IW_T1I7_IMM7 (opcode) << 25) >> 24;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ address = address + 2 + i;
+ (*info->print_address_func) (address, info);
+ break;
+
+ case 'j':
+ /* 5-bit unsigned immediate. */
+ switch (op->format)
+ {
+ case iw_r_type:
+ i = GET_IW_R_IMM5 (opcode);
+ break;
+ case iw_F3X6L5_type:
+ i = GET_IW_F3X6L5_IMM5 (opcode);
+ break;
+ case iw_F2X6L10_type:
+ i = GET_IW_F2X6L10_MSB (opcode);
+ break;
+ case iw_X2L5_type:
+ i = GET_IW_X2L5_IMM5 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'k':
+ /* Second 5-bit unsigned immediate field. */
+ switch (op->format)
+ {
+ case iw_F2X6L10_type:
+ i = GET_IW_F2X6L10_LSB (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'l':
+ /* 8-bit unsigned immediate. */
+ switch (op->format)
+ {
+ case iw_custom_type:
+ i = GET_IW_CUSTOM_N (opcode);
+ break;
+ case iw_F3X8_type:
+ i = GET_IW_F3X8_N (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%lu", i);
+ break;
+
+ case 'm':
+ /* 26-bit unsigned immediate. */
+ switch (op->format)
+ {
+ case iw_j_type:
+ i = GET_IW_J_IMM26 (opcode);
+ break;
+ case iw_L26_type:
+ i = GET_IW_L26_IMM26 (opcode);
+ break;
+ default:
+ bad_opcode (op);
+ }
+ /* This translates to an address because it's only used in call
+ instructions. */
+ address = (address & 0xf0000000) | (i << 2);
+ (*info->print_address_func) (address, info);
+ break;
+
+ case 'e':
+ /* Encoded enumeration for addi.n/subi.n. */
+ switch (op->format)
+ {
+ case iw_T2X1I3_type:
+ i = nios2_r2_asi_n_mappings[GET_IW_T2X1I3_IMM3 (opcode)];
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%lu", i);
+ break;
+
+ case 'f':
+ /* Encoded enumeration for slli.n/srli.n. */
+ switch (op->format)
+ {
+ case iw_T2X1L3_type:
+ i = nios2_r2_shi_n_mappings[GET_IW_T2X1I3_IMM3 (opcode)];
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%lu", i);
+ break;
+
+ case 'g':
+ /* Encoded enumeration for andi.n. */
+ switch (op->format)
+ {
+ case iw_T2I4_type:
+ i = nios2_r2_andi_n_mappings[GET_IW_T2I4_IMM4 (opcode)];
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%lu", i);
+ break;
+
+ case 'h':
+ /* Encoded enumeration for movi.n. */
+ switch (op->format)
+ {
+ case iw_T1I7_type:
+ i = GET_IW_T1I7_IMM7 (opcode);
+ if (i == 125)
+ i = 0xff;
+ else if (i == 126)
+ i = -2;
+ else if (i == 127)
+ i = -1;
+ break;
+ default:
+ bad_opcode (op);
+ }
+ (*info->fprintf_func) (info->stream, "%ld", i);
+ break;
+
+ case 'R':
+ {
+ unsigned long reglist = 0;
+ int dir = 1;
+ int k, t;
+
+ switch (op->format)
+ {
+ case iw_F1X4L17_type:
+ /* Encoding for ldwm/stwm. */
+ i = GET_IW_F1X4L17_REGMASK (opcode);
+ if (GET_IW_F1X4L17_RS (opcode))
+ {
+ reglist = ((i << 14) & 0x00ffc000);
+ if (i & (1 << 10))
+ reglist |= (1 << 28);
+ if (i & (1 << 11))
+ reglist |= (1 << 31);
+ }
+ else
+ reglist = i << 2;
+ dir = GET_IW_F1X4L17_REGMASK (opcode) ? 1 : -1;
+ break;
+
+ case iw_L5I4X1_type:
+ /* Encoding for push.n/pop.n. */
+ reglist |= (1 << 31);
+ if (GET_IW_L5I4X1_FP (opcode))
+ reglist |= (1 << 28);
+ if (GET_IW_L5I4X1_CS (opcode))
+ {
+ int val = GET_IW_L5I4X1_REGRANGE (opcode);
+ reglist |= nios2_r2_reg_range_mappings[val];
+ }
+ dir = (op->match == MATCH_R2_POP_N ? 1 : -1);
+ break;
+
+ default:
+ bad_opcode (op);
+ }
+
+ t = 0;
+ (*info->fprintf_func) (info->stream, "{");
+ for (k = (dir == 1 ? 0 : 31);
+ (dir == 1 && k < 32) || (dir == -1 && k >= 0);
+ k += dir)
+ if (reglist & (1 << k))
+ {
+ if (t)
+ (*info->fprintf_func) (info->stream, ",");
+ else
+ t++;
+ (*info->fprintf_func) (info->stream, "%s", nios2_regs[k].name);
+ }
+ (*info->fprintf_func) (info->stream, "}");
+ break;
+ }
+
+ case 'B':
+ /* Base register and options for ldwm/stwm. */
+ switch (op->format)
+ {
+ case iw_F1X4L17_type:
+ if (GET_IW_F1X4L17_ID (opcode) == 0)
+ (*info->fprintf_func) (info->stream, "--");
+
+ i = GET_IW_F1X4I12_A (opcode);
+ (*info->fprintf_func) (info->stream, "(%s)",
+ nios2_builtin_regs[i].name);
+
+ if (GET_IW_F1X4L17_ID (opcode))
+ (*info->fprintf_func) (info->stream, "++");
+ if (GET_IW_F1X4L17_WB (opcode))
+ (*info->fprintf_func) (info->stream, ",writeback");
+ if (GET_IW_F1X4L17_PC (opcode))
+ (*info->fprintf_func) (info->stream, ",ret");
+ break;
+ default:
+ bad_opcode (op);
+ }
+ break;
+
+ default:
+ (*info->fprintf_func) (info->stream, "unknown");
+ break;
+ }
+ return 0;
+}
+
+/* nios2_disassemble does all the work of disassembling a Nios II
+ instruction opcode. */
+static int
+nios2_disassemble (bfd_vma address, unsigned long opcode,
+ disassemble_info *info)
+{
+ const struct nios2_opcode *op;
+
+ info->bytes_per_line = INSNLEN;
+ info->bytes_per_chunk = INSNLEN;
+ info->display_endian = info->endian;
+ info->insn_info_valid = 1;
+ info->branch_delay_insns = 0;
+ info->data_size = 0;
+ info->insn_type = dis_nonbranch;
+ info->target = 0;
+ info->target2 = 0;
+
+ /* Find the major opcode and use this to disassemble
+ the instruction and its arguments. */
+ op = nios2_find_opcode_hash (opcode, info->mach);
+
+ if (op != NULL)
+ {
+ const char *argstr = op->args;
+ (*info->fprintf_func) (info->stream, "%s", op->name);
+ if (argstr != NULL && *argstr != '\0')
+ {
+ (*info->fprintf_func) (info->stream, "\t");
+ while (*argstr != '\0')
+ {
+ nios2_print_insn_arg (argstr, opcode, address, info, op);
+ ++argstr;
+ }
+ }
+ /* Tell the caller how far to advance the program counter. */
+ info->bytes_per_chunk = op->size;
+ return op->size;
+ }
+ else
+ {
+ /* Handle undefined instructions. */
+ info->insn_type = dis_noninsn;
+ (*info->fprintf_func) (info->stream, "0x%lx", opcode);
+ return INSNLEN;
+ }
+}
+
+
+/* print_insn_nios2 is the main disassemble function for Nios II.
+ The function diassembler(abfd) (source in disassemble.c) returns a
+ pointer to this either print_insn_big_nios2 or
+ print_insn_little_nios2, which in turn call this function when the
+ bfd machine type is Nios II. print_insn_nios2 reads the
+ instruction word at the address given, and prints the disassembled
+ instruction on the stream info->stream using info->fprintf_func. */
+
+static int
+print_insn_nios2 (bfd_vma address, disassemble_info *info,
+ enum bfd_endian endianness)
+{
+ bfd_byte buffer[INSNLEN];
+ int status;
+
+ status = (*info->read_memory_func) (address, buffer, INSNLEN, info);
+ if (status == 0)
+ {
+ unsigned long insn;
+ if (endianness == BFD_ENDIAN_BIG)
+ insn = (unsigned long) bfd_getb32 (buffer);
+ else
+ insn = (unsigned long) bfd_getl32 (buffer);
+ return nios2_disassemble (address, insn, info);
+ }
+
+ /* We might have a 16-bit R2 instruction at the end of memory. Try that. */
+ if (info->mach == bfd_mach_nios2r2)
+ {
+ status = (*info->read_memory_func) (address, buffer, 2, info);
+ if (status == 0)
+ {
+ unsigned long insn;
+ if (endianness == BFD_ENDIAN_BIG)
+ insn = (unsigned long) bfd_getb16 (buffer);
+ else
+ insn = (unsigned long) bfd_getl16 (buffer);
+ return nios2_disassemble (address, insn, info);
+ }
+ }
+
+ /* If we got here, we couldn't read anything. */
+ (*info->memory_error_func) (status, address, info);
+ return -1;
+}
+
+/* These two functions are the main entry points, accessed from
+ disassemble.c. */
+int
+print_insn_big_nios2 (bfd_vma address, disassemble_info *info)
+{
+ return print_insn_nios2 (address, info, BFD_ENDIAN_BIG);
+}
+
+int
+print_insn_little_nios2 (bfd_vma address, disassemble_info *info)
+{
+ return print_insn_nios2 (address, info, BFD_ENDIAN_LITTLE);
+}
diff --git a/disas/ppc.c b/disas/ppc.c
index bd05623a79..ed7e0d0b9c 100644
--- a/disas/ppc.c
+++ b/disas/ppc.c
@@ -1955,6 +1955,9 @@ extract_tbr (unsigned long insn,
#define POWER4 PPC_OPCODE_POWER4
#define POWER5 PPC_OPCODE_POWER5
#define POWER6 PPC_OPCODE_POWER6
+/* Documentation purposes only; we don't actually check the isa for disas. */
+#define POWER7 PPC_OPCODE_POWER6
+#define POWER9 PPC_OPCODE_POWER6
#define CELL PPC_OPCODE_CELL
#define PPC32 PPC_OPCODE_32 | PPC_OPCODE_PPC
#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_PPC
@@ -3589,6 +3592,13 @@ const struct powerpc_opcode powerpc_opcodes[] = {
{ "lbzux", X(31,119), X_MASK, COM, { RT, RAL, RB } },
{ "popcntb", X(31,122), XRB_MASK, POWER5, { RA, RS } },
+{ "popcntw", X(31,378), XRB_MASK, POWER7, { RA, RS } },
+{ "popcntd", X(31,506), XRB_MASK, POWER7, { RA, RS } },
+
+{ "cnttzw", XRC(31,538,0), XRB_MASK, POWER9, { RA, RS } },
+{ "cnttzw.", XRC(31,538,1), XRB_MASK, POWER9, { RA, RS } },
+{ "cnttzd", XRC(31,570,0), XRB_MASK, POWER9, { RA, RS } },
+{ "cnttzd.", XRC(31,570,1), XRB_MASK, POWER9, { RA, RS } },
{ "not", XRC(31,124,0), X_MASK, COM, { RA, RS, RBS } },
{ "nor", XRC(31,124,0), X_MASK, COM, { RA, RS, RB } },
diff --git a/docs/colo-proxy.txt b/docs/colo-proxy.txt
index 76767cb34f..c4941de198 100644
--- a/docs/colo-proxy.txt
+++ b/docs/colo-proxy.txt
@@ -158,7 +158,9 @@ secondary.
== Usage ==
-Here, we use demo ip and port discribe more clearly.
+Here is an example using demonstration IP and port addresses to more
+clearly describe the usage.
+
Primary(ip:3.3.3.3):
-netdev tap,id=hn0,vhost=off,script=/etc/qemu-ifup,downscript=/etc/qemu-ifdown
-device e1000,id=e0,netdev=hn0,mac=52:a4:00:12:78:66
diff --git a/docs/lockcnt.txt b/docs/lockcnt.txt
new file mode 100644
index 0000000000..2a79b3205b
--- /dev/null
+++ b/docs/lockcnt.txt
@@ -0,0 +1,277 @@
+DOCUMENTATION FOR LOCKED COUNTERS (aka QemuLockCnt)
+===================================================
+
+QEMU often uses reference counts to track data structures that are being
+accessed and should not be freed. For example, a loop that invoke
+callbacks like this is not safe:
+
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+
+QLIST_FOREACH_SAFE protects against deletion of the current node (ioh)
+by stashing away its "next" pointer. However, ioh->fd_write could
+actually delete the next node from the list. The simplest way to
+avoid this is to mark the node as deleted, and remove it from the
+list in the above loop:
+
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ } else {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+ }
+
+If however this loop must also be reentrant, i.e. it is possible that
+ioh->fd_write invokes the loop again, some kind of counting is needed:
+
+ walking_handlers++;
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ if (walking_handlers == 1) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ }
+ } else {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+ }
+ walking_handlers--;
+
+One may think of using the RCU primitives, rcu_read_lock() and
+rcu_read_unlock(); effectively, the RCU nesting count would take
+the place of the walking_handlers global variable. Indeed,
+reference counting and RCU have similar purposes, but their usage in
+general is complementary:
+
+- reference counting is fine-grained and limited to a single data
+ structure; RCU delays reclamation of *all* RCU-protected data
+ structures;
+
+- reference counting works even in the presence of code that keeps
+ a reference for a long time; RCU critical sections in principle
+ should be kept short;
+
+- reference counting is often applied to code that is not thread-safe
+ but is reentrant; in fact, usage of reference counting in QEMU predates
+ the introduction of threads by many years. RCU is generally used to
+ protect readers from other threads freeing memory after concurrent
+ modifications to a data structure.
+
+- reclaiming data can be done by a separate thread in the case of RCU;
+ this can improve performance, but also delay reclamation undesirably.
+ With reference counting, reclamation is deterministic.
+
+This file documents QemuLockCnt, an abstraction for using reference
+counting in code that has to be both thread-safe and reentrant.
+
+
+QemuLockCnt concepts
+--------------------
+
+A QemuLockCnt comprises both a counter and a mutex; it has primitives
+to increment and decrement the counter, and to take and release the
+mutex. The counter notes how many visits to the data structures are
+taking place (the visits could be from different threads, or there could
+be multiple reentrant visits from the same thread). The basic rules
+governing the counter/mutex pair then are the following:
+
+- Data protected by the QemuLockCnt must not be freed unless the
+ counter is zero and the mutex is taken.
+
+- A new visit cannot be started while the counter is zero and the
+ mutex is taken.
+
+Most of the time, the mutex protects all writes to the data structure,
+not just frees, though there could be cases where this is not necessary.
+
+Reads, instead, can be done without taking the mutex, as long as the
+readers and writers use the same macros that are used for RCU, for
+example atomic_rcu_read, atomic_rcu_set, QLIST_FOREACH_RCU, etc. This is
+because the reads are done outside a lock and a set or QLIST_INSERT_HEAD
+can happen concurrently with the read. The RCU API ensures that the
+processor and the compiler see all required memory barriers.
+
+This could be implemented simply by protecting the counter with the
+mutex, for example:
+
+ // (1)
+ qemu_mutex_lock(&walking_handlers_mutex);
+ walking_handlers++;
+ qemu_mutex_unlock(&walking_handlers_mutex);
+
+ ...
+
+ // (2)
+ qemu_mutex_lock(&walking_handlers_mutex);
+ if (--walking_handlers == 0) {
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ }
+ }
+ }
+ qemu_mutex_unlock(&walking_handlers_mutex);
+
+Here, no frees can happen in the code represented by the ellipsis.
+If another thread is executing critical section (2), that part of
+the code cannot be entered, because the thread will not be able
+to increment the walking_handlers variable. And of course
+during the visit any other thread will see a nonzero value for
+walking_handlers, as in the single-threaded code.
+
+Note that it is possible for multiple concurrent accesses to delay
+the cleanup arbitrarily; in other words, for the walking_handlers
+counter to never become zero. For this reason, this technique is
+more easily applicable if concurrent access to the structure is rare.
+
+However, critical sections are easy to forget since you have to do
+them for each modification of the counter. QemuLockCnt ensures that
+all modifications of the counter take the lock appropriately, and it
+can also be more efficient in two ways:
+
+- it avoids taking the lock for many operations (for example
+ incrementing the counter while it is non-zero);
+
+- on some platforms, one can implement QemuLockCnt to hold the lock
+ and the mutex in a single word, making the fast path no more expensive
+ than simply managing a counter using atomic operations (see
+ docs/atomics.txt). This can be very helpful if concurrent access to
+ the data structure is expected to be rare.
+
+
+Using the same mutex for frees and writes can still incur some small
+inefficiencies; for example, a visit can never start if the counter is
+zero and the mutex is taken---even if the mutex is taken by a write,
+which in principle need not block a visit of the data structure.
+However, these are usually not a problem if any of the following
+assumptions are valid:
+
+- concurrent access is possible but rare
+
+- writes are rare
+
+- writes are frequent, but this kind of write (e.g. appending to a
+ list) has a very small critical section.
+
+For example, QEMU uses QemuLockCnt to manage an AioContext's list of
+bottom halves and file descriptor handlers. Modifications to the list
+of file descriptor handlers are rare. Creation of a new bottom half is
+frequent and can happen on a fast path; however: 1) it is almost never
+concurrent with a visit to the list of bottom halves; 2) it only has
+three instructions in the critical path, two assignments and a smp_wmb().
+
+
+QemuLockCnt API
+---------------
+
+The QemuLockCnt API is described in include/qemu/thread.h.
+
+
+QemuLockCnt usage
+-----------------
+
+This section explains the typical usage patterns for QemuLockCnt functions.
+
+Setting a variable to a non-NULL value can be done between
+qemu_lockcnt_lock and qemu_lockcnt_unlock:
+
+ qemu_lockcnt_lock(&xyz_lockcnt);
+ if (!xyz) {
+ new_xyz = g_new(XYZ, 1);
+ ...
+ atomic_rcu_set(&xyz, new_xyz);
+ }
+ qemu_lockcnt_unlock(&xyz_lockcnt);
+
+Accessing the value can be done between qemu_lockcnt_inc and
+qemu_lockcnt_dec:
+
+ qemu_lockcnt_inc(&xyz_lockcnt);
+ if (xyz) {
+ XYZ *p = atomic_rcu_read(&xyz);
+ ...
+ /* Accesses can now be done through "p". */
+ }
+ qemu_lockcnt_dec(&xyz_lockcnt);
+
+Freeing the object can similarly use qemu_lockcnt_lock and
+qemu_lockcnt_unlock, but you also need to ensure that the count
+is zero (i.e. there is no concurrent visit). Because qemu_lockcnt_inc
+takes the QemuLockCnt's lock, the count cannot become non-zero while
+the object is being freed. Freeing an object looks like this:
+
+ qemu_lockcnt_lock(&xyz_lockcnt);
+ if (!qemu_lockcnt_count(&xyz_lockcnt)) {
+ g_free(xyz);
+ xyz = NULL;
+ }
+ qemu_lockcnt_unlock(&xyz_lockcnt);
+
+If an object has to be freed right after a visit, you can combine
+the decrement, the locking and the check on count as follows:
+
+ qemu_lockcnt_inc(&xyz_lockcnt);
+ if (xyz) {
+ XYZ *p = atomic_rcu_read(&xyz);
+ ...
+ /* Accesses can now be done through "p". */
+ }
+ if (qemu_lockcnt_dec_and_lock(&xyz_lockcnt)) {
+ g_free(xyz);
+ xyz = NULL;
+ qemu_lockcnt_unlock(&xyz_lockcnt);
+ }
+
+QemuLockCnt can also be used to access a list as follows:
+
+ qemu_lockcnt_inc(&io_handlers_lockcnt);
+ QLIST_FOREACH_RCU(ioh, &io_handlers, pioh) {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+
+ if (qemu_lockcnt_dec_and_lock(&io_handlers_lockcnt)) {
+ QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ }
+ }
+ qemu_lockcnt_unlock(&io_handlers_lockcnt);
+ }
+
+Again, the RCU primitives are used because new items can be added to the
+list during the walk. QLIST_FOREACH_RCU ensures that the processor and
+the compiler see the appropriate memory barriers.
+
+An alternative pattern uses qemu_lockcnt_dec_if_lock:
+
+ qemu_lockcnt_inc(&io_handlers_lockcnt);
+ QLIST_FOREACH_SAFE_RCU(ioh, &io_handlers, next, pioh) {
+ if (ioh->deleted) {
+ if (qemu_lockcnt_dec_if_lock(&io_handlers_lockcnt)) {
+ QLIST_REMOVE(ioh, next);
+ g_free(ioh);
+ qemu_lockcnt_inc_and_unlock(&io_handlers_lockcnt);
+ }
+ } else {
+ if (ioh->revents & G_IO_OUT) {
+ ioh->fd_write(ioh->opaque);
+ }
+ }
+ }
+ qemu_lockcnt_dec(&io_handlers_lockcnt);
+
+Here you can use qemu_lockcnt_dec instead of qemu_lockcnt_dec_and_lock,
+because there is no special task to do if the count goes from 1 to 0.
diff --git a/docs/multiple-iothreads.txt b/docs/multiple-iothreads.txt
index 0e7cdb2c28..e4d340bbb7 100644
--- a/docs/multiple-iothreads.txt
+++ b/docs/multiple-iothreads.txt
@@ -84,9 +84,8 @@ How to synchronize with an IOThread
AioContext is not thread-safe so some rules must be followed when using file
descriptors, event notifiers, timers, or BHs across threads:
-1. AioContext functions can be called safely from file descriptor, event
-notifier, timer, or BH callbacks invoked by the AioContext. No locking is
-necessary.
+1. AioContext functions can always be called safely. They handle their
+own locking internally.
2. Other threads wishing to access the AioContext must use
aio_context_acquire()/aio_context_release() for mutual exclusion. Once the
@@ -94,16 +93,14 @@ context is acquired no other thread can access it or run event loop iterations
in this AioContext.
aio_context_acquire()/aio_context_release() calls may be nested. This
-means you can call them if you're not sure whether #1 applies.
+means you can call them if you're not sure whether #2 applies.
There is currently no lock ordering rule if a thread needs to acquire multiple
AioContexts simultaneously. Therefore, it is only safe for code holding the
QEMU global mutex to acquire other AioContexts.
-Side note: the best way to schedule a function call across threads is to create
-a BH in the target AioContext beforehand and then call qemu_bh_schedule(). No
-acquire/release or locking is needed for the qemu_bh_schedule() call. But be
-sure to acquire the AioContext for aio_bh_new() if necessary.
+Side note: the best way to schedule a function call across threads is to call
+aio_bh_schedule_oneshot(). No acquire/release or locking is needed.
AioContext and the block layer
------------------------------
diff --git a/docs/pcie.txt b/docs/pcie.txt
index 9fb20aaed9..5bada24a15 100644
--- a/docs/pcie.txt
+++ b/docs/pcie.txt
@@ -110,18 +110,18 @@ Plug only PCI Express devices into PCI Express Ports.
-device ioh3420,id=root_port1,chassis=x,slot=y[,bus=pcie.0][,addr=z] \
-device <dev>,bus=root_port1
2.2.2 Using multi-function PCI Express Root Ports:
- -device ioh3420,id=root_port1,multifunction=on,chassis=x,slot=y[,bus=pcie.0][,addr=z.0] \
- -device ioh3420,id=root_port2,chassis=x1,slot=y1[,bus=pcie.0][,addr=z.1] \
- -device ioh3420,id=root_port3,chassis=x2,slot=y2[,bus=pcie.0][,addr=z.2] \
-2.2.2 Plugging a PCI Express device into a Switch:
+ -device ioh3420,id=root_port1,multifunction=on,chassis=x,addr=z.0[,slot=y][,bus=pcie.0] \
+ -device ioh3420,id=root_port2,chassis=x1,addr=z.1[,slot=y1][,bus=pcie.0] \
+ -device ioh3420,id=root_port3,chassis=x2,addr=z.2[,slot=y2][,bus=pcie.0] \
+2.2.3 Plugging a PCI Express device into a Switch:
-device ioh3420,id=root_port1,chassis=x,slot=y[,bus=pcie.0][,addr=z] \
-device x3130-upstream,id=upstream_port1,bus=root_port1[,addr=x] \
-device xio3130-downstream,id=downstream_port1,bus=upstream_port1,chassis=x1,slot=y1[,addr=z1]] \
-device <dev>,bus=downstream_port1
Notes:
- - (slot, chassis) pair is mandatory and must be
- unique for each PCI Express Root Port.
+ - (slot, chassis) pair is mandatory and must be unique for each
+ PCI Express Root Port. slot defaults to 0 when not specified.
- 'addr' parameter can be 0 for all the examples above.
diff --git a/docs/qapi-code-gen.txt b/docs/qapi-code-gen.txt
index 2841c5144a..7eb7be12ab 100644
--- a/docs/qapi-code-gen.txt
+++ b/docs/qapi-code-gen.txt
@@ -44,40 +44,154 @@ Input must be ASCII (although QMP supports full Unicode strings, the
QAPI parser does not). At present, there is no place where a QAPI
schema requires the use of JSON numbers or null.
+
+=== Comments ===
+
Comments are allowed; anything between an unquoted # and the following
-newline is ignored. Although there is not yet a documentation
-generator, a form of stylized comments has developed for consistently
-documenting details about an expression and when it was added to the
-schema. The documentation is delimited between two lines of ##, then
-the first line names the expression, an optional overview is provided,
-then individual documentation about each member of 'data' is provided,
-and finally, a 'Since: x.y.z' tag lists the release that introduced
-the expression. Optional members are tagged with the phrase
-'#optional', often with their default value; and extensions added
-after the expression was first released are also given a '(since
-x.y.z)' comment. For example:
-
- ##
- # @BlockStats:
- #
- # Statistics of a virtual block device or a block backing device.
- #
- # @device: #optional If the stats are for a virtual block device, the name
- # corresponding to the virtual block device.
- #
- # @stats: A @BlockDeviceStats for the device.
- #
- # @parent: #optional This describes the file block device if it has one.
- #
- # @backing: #optional This describes the backing block device if it has one.
- # (Since 2.0)
- #
- # Since: 0.14.0
- ##
- { 'struct': 'BlockStats',
- 'data': {'*device': 'str', 'stats': 'BlockDeviceStats',
- '*parent': 'BlockStats',
- '*backing': 'BlockStats'} }
+newline is ignored.
+
+A multi-line comment that starts and ends with a '##' line is a
+documentation comment. These are parsed by the documentation
+generator, which recognizes certain markup detailed below.
+
+
+==== Documentation markup ====
+
+Comment text starting with '=' is a section title:
+
+ # = Section title
+
+Double the '=' for a subsection title:
+
+ # == Subection title
+
+'|' denotes examples:
+
+ # | Text of the example, may span
+ # | multiple lines
+
+'*' starts an itemized list:
+
+ # * First item, may span
+ # multiple lines
+ # * Second item
+
+You can also use '-' instead of '*'.
+
+A decimal number followed by '.' starts a numbered list:
+
+ # 1. First item, may span
+ # multiple lines
+ # 2. Second item
+
+The actual number doesn't matter. You could even use '*' instead of
+'2.' for the second item.
+
+Lists can't be nested. Blank lines are currently not supported within
+lists.
+
+Additional whitespace between the initial '#' and the comment text is
+permitted.
+
+*foo* and _foo_ are for strong and emphasis styles respectively (they
+do not work over multiple lines). @foo is used to reference a name in
+the schema.
+
+Example:
+
+##
+# = Section
+# == Subsection
+#
+# Some text foo with *strong* and _emphasis_
+# 1. with a list
+# 2. like that
+#
+# And some code:
+# | $ echo foo
+# | -> do this
+# | <- get that
+#
+##
+
+
+==== Expression documentation ====
+
+Each expression that isn't an include directive must be preceded by a
+documentation block. Such blocks are called expression documentation
+blocks.
+
+The documentation block consists of a first line naming the
+expression, an optional overview, a description of each argument (for
+commands and events) or member (for structs, unions and alternates),
+and optional tagged sections.
+
+FIXME: the parser accepts these things in almost any order.
+
+Optional arguments / members are tagged with the phrase '#optional',
+often with their default value; and extensions added after the
+expression was first released are also given a '(since x.y.z)'
+comment.
+
+A tagged section starts with one of the following words:
+"Note:"/"Notes:", "Since:", "Example"/"Examples", "Returns:", "TODO:".
+The section ends with the start of a new section.
+
+A 'Since: x.y.z' tagged section lists the release that introduced the
+expression.
+
+For example:
+
+##
+# @BlockStats:
+#
+# Statistics of a virtual block device or a block backing device.
+#
+# @device: #optional If the stats are for a virtual block device, the name
+# corresponding to the virtual block device.
+#
+# @node-name: #optional The node name of the device. (since 2.3)
+#
+# ... more members ...
+#
+# Since: 0.14.0
+##
+{ 'struct': 'BlockStats',
+ 'data': {'*device': 'str', '*node-name': 'str',
+ ... more members ... } }
+
+##
+# @query-blockstats:
+#
+# Query the @BlockStats for all virtual block devices.
+#
+# @query-nodes: #optional If true, the command will query all the
+# block nodes ... explain, explain ... (since 2.3)
+#
+# Returns: A list of @BlockStats for each virtual block devices.
+#
+# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-blockstats" }
+# <- {
+# ... lots of output ...
+# }
+#
+##
+{ 'command': 'query-blockstats',
+ 'data': { '*query-nodes': 'bool' },
+ 'returns': ['BlockStats'] }
+
+==== Free-form documentation ====
+
+A documentation block that isn't an expression documentation block is
+a free-form documentation block. These may be used to provide
+additional text and structuring content.
+
+
+=== Schema overview ===
The schema sets up a series of types, as well as commands and events
that will use those types. Forward references are allowed: the parser
diff --git a/docs/qemu-ga-ref.texi b/docs/qemu-ga-ref.texi
new file mode 100644
index 0000000000..87cc8d01a5
--- /dev/null
+++ b/docs/qemu-ga-ref.texi
@@ -0,0 +1,78 @@
+\input texinfo
+@setfilename qemu-ga-ref.info
+
+@exampleindent 0
+@paragraphindent 0
+
+@settitle QEMU Guest Agent Protocol Reference
+
+@iftex
+@center @image{docs/qemu_logo}
+@end iftex
+
+@copying
+This is the QEMU Guest Agent Protocol reference manual.
+
+Copyright @copyright{} 2016 The QEMU Project developers
+
+@quotation
+This manual is free documentation: you can redistribute it and/or
+modify it under the terms of the GNU General Public License as
+published by the Free Software Foundation, either version 2 of the
+License, or (at your option) any later version.
+
+This manual is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this manual. If not, see http://www.gnu.org/licenses/.
+@end quotation
+@end copying
+
+@dircategory QEMU
+@direntry
+* QEMU-GA-Ref: (qemu-ga-ref). QEMU Guest Agent Protocol Reference
+@end direntry
+
+@titlepage
+@title Guest Agent Protocol Reference Manual
+@subtitle QEMU version @value{VERSION}
+@page
+@vskip 0pt plus 1filll
+@insertcopying
+@end titlepage
+
+@contents
+
+@ifnottex
+@node Top
+@top QEMU Guest Agent protocol reference
+@end ifnottex
+
+@menu
+* API Reference::
+* Commands and Events Index::
+* Data Types Index::
+@end menu
+
+@node API Reference
+@chapter API Reference
+
+@c for texi2pod:
+@c man begin DESCRIPTION
+
+@include qemu-ga-qapi.texi
+
+@c man end
+
+@node Commands and Events Index
+@unnumbered Commands and Events Index
+@printindex fn
+
+@node Data Types Index
+@unnumbered Data Types Index
+@printindex tp
+
+@bye
diff --git a/docs/qemu-qmp-ref.texi b/docs/qemu-qmp-ref.texi
new file mode 100644
index 0000000000..818e52573b
--- /dev/null
+++ b/docs/qemu-qmp-ref.texi
@@ -0,0 +1,78 @@
+\input texinfo
+@setfilename qemu-qmp-ref.info
+
+@exampleindent 0
+@paragraphindent 0
+
+@settitle QEMU QMP Reference Manual
+
+@iftex
+@center @image{docs/qemu_logo}
+@end iftex
+
+@copying
+This is the QEMU QMP reference manual.
+
+Copyright @copyright{} 2016 The QEMU Project developers
+
+@quotation
+This manual is free documentation: you can redistribute it and/or
+modify it under the terms of the GNU General Public License as
+published by the Free Software Foundation, either version 2 of the
+License, or (at your option) any later version.
+
+This manual is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this manual. If not, see http://www.gnu.org/licenses/.
+@end quotation
+@end copying
+
+@dircategory QEMU
+@direntry
+* QEMU-QMP-Ref: (qemu-qmp-ref). QEMU QMP Reference Manual
+@end direntry
+
+@titlepage
+@title QMP Reference Manual
+@subtitle QEMU version @value{VERSION}
+@page
+@vskip 0pt plus 1filll
+@insertcopying
+@end titlepage
+
+@contents
+
+@ifnottex
+@node Top
+@top QEMU QMP reference
+@end ifnottex
+
+@menu
+* API Reference::
+* Commands and Events Index::
+* Data Types Index::
+@end menu
+
+@node API Reference
+@chapter API Reference
+
+@c for texi2pod:
+@c man begin DESCRIPTION
+
+@include qemu-qapi.texi
+
+@c man end
+
+@node Commands and Events Index
+@unnumbered Commands and Events Index
+@printindex fn
+
+@node Data Types Index
+@unnumbered Data Types Index
+@printindex tp
+
+@bye
diff --git a/docs/qemu_logo.pdf b/docs/qemu_logo.pdf
new file mode 100644
index 0000000000..294cb7dec5
--- /dev/null
+++ b/docs/qemu_logo.pdf
Binary files differ
diff --git a/docs/qmp-commands.txt b/docs/qmp-commands.txt
deleted file mode 100644
index abf210a596..0000000000
--- a/docs/qmp-commands.txt
+++ /dev/null
@@ -1,3824 +0,0 @@
- QMP Supported Commands
- ----------------------
-
-This document describes all commands currently supported by QMP.
-
-Most of the time their usage is exactly the same as in the user Monitor, this
-means that any other document which also describe commands (the manpage,
-QEMU's manual, etc) can and should be consulted.
-
-QMP has two types of commands: regular and query commands. Regular commands
-usually change the Virtual Machine's state someway, while query commands just
-return information. The sections below are divided accordingly.
-
-It's important to observe that all communication examples are formatted in
-a reader-friendly way, so that they're easier to understand. However, in real
-protocol usage, they're emitted as a single line.
-
-Also, the following notation is used to denote data flow:
-
--> data issued by the Client
-<- Server data response
-
-Please, refer to the QMP specification (docs/qmp-spec.txt) for detailed
-information on the Server command and response formats.
-
-NOTE: This document is temporary and will be replaced soon.
-
-1. Stability Considerations
-===========================
-
-The current QMP command set (described in this file) may be useful for a
-number of use cases, however it's limited and several commands have bad
-defined semantics, specially with regard to command completion.
-
-These problems are going to be solved incrementally in the next QEMU releases
-and we're going to establish a deprecation policy for badly defined commands.
-
-If you're planning to adopt QMP, please observe the following:
-
- 1. The deprecation policy will take effect and be documented soon, please
- check the documentation of each used command as soon as a new release of
- QEMU is available
-
- 2. DO NOT rely on anything which is not explicit documented
-
- 3. Errors, in special, are not documented. Applications should NOT check
- for specific errors classes or data (it's strongly recommended to only
- check for the "error" key)
-
-2. Regular Commands
-===================
-
-Server's responses in the examples below are always a success response, please
-refer to the QMP specification for more details on error responses.
-
-quit
-----
-
-Quit the emulator.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "quit" }
-<- { "return": {} }
-
-eject
------
-
-Eject a removable medium.
-
-Arguments:
-
-- "force": force ejection (json-bool, optional)
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-
-Example:
-
--> { "execute": "eject", "arguments": { "id": "ide0-1-0" } }
-<- { "return": {} }
-
-Note: The "force" argument defaults to false.
-
-change
-------
-
-Change a removable medium or VNC configuration.
-
-Arguments:
-
-- "device": device name (json-string)
-- "target": filename or item (json-string)
-- "arg": additional argument (json-string, optional)
-
-Examples:
-
-1. Change a removable medium
-
--> { "execute": "change",
- "arguments": { "device": "ide1-cd0",
- "target": "/srv/images/Fedora-12-x86_64-DVD.iso" } }
-<- { "return": {} }
-
-2. Change VNC password
-
--> { "execute": "change",
- "arguments": { "device": "vnc", "target": "password",
- "arg": "foobar1" } }
-<- { "return": {} }
-
-screendump
-----------
-
-Save screen into PPM image.
-
-Arguments:
-
-- "filename": file path (json-string)
-
-Example:
-
--> { "execute": "screendump", "arguments": { "filename": "/tmp/image" } }
-<- { "return": {} }
-
-stop
-----
-
-Stop the emulator.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "stop" }
-<- { "return": {} }
-
-cont
-----
-
-Resume emulation.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "cont" }
-<- { "return": {} }
-
-system_wakeup
--------------
-
-Wakeup guest from suspend.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "system_wakeup" }
-<- { "return": {} }
-
-system_reset
-------------
-
-Reset the system.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "system_reset" }
-<- { "return": {} }
-
-system_powerdown
-----------------
-
-Send system power down event.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "system_powerdown" }
-<- { "return": {} }
-
-device_add
-----------
-
-Add a device.
-
-Arguments:
-
-- "driver": the name of the new device's driver (json-string)
-- "bus": the device's parent bus (device tree path, json-string, optional)
-- "id": the device's ID, must be unique (json-string)
-- device properties
-
-Example:
-
--> { "execute": "device_add", "arguments": { "driver": "e1000", "id": "net1" } }
-<- { "return": {} }
-
-Notes:
-
-(1) For detailed information about this command, please refer to the
- 'docs/qdev-device-use.txt' file.
-
-(2) It's possible to list device properties by running QEMU with the
- "-device DEVICE,\?" command-line argument, where DEVICE is the device's name
-
-device_del
-----------
-
-Remove a device.
-
-Arguments:
-
-- "id": the device's ID or QOM path (json-string)
-
-Example:
-
--> { "execute": "device_del", "arguments": { "id": "net1" } }
-<- { "return": {} }
-
-Example:
-
--> { "execute": "device_del", "arguments": { "id": "/machine/peripheral-anon/device[0]" } }
-<- { "return": {} }
-
-send-key
-----------
-
-Send keys to VM.
-
-Arguments:
-
-keys array:
- - "key": key sequence (a json-array of key union values,
- union can be number or qcode enum)
-
-- hold-time: time to delay key up events, milliseconds. Defaults to 100
- (json-int, optional)
-
-Example:
-
--> { "execute": "send-key",
- "arguments": { "keys": [ { "type": "qcode", "data": "ctrl" },
- { "type": "qcode", "data": "alt" },
- { "type": "qcode", "data": "delete" } ] } }
-<- { "return": {} }
-
-cpu
----
-
-Set the default CPU.
-
-Arguments:
-
-- "index": the CPU's index (json-int)
-
-Example:
-
--> { "execute": "cpu", "arguments": { "index": 0 } }
-<- { "return": {} }
-
-Note: CPUs' indexes are obtained with the 'query-cpus' command.
-
-cpu-add
--------
-
-Adds virtual cpu
-
-Arguments:
-
-- "id": cpu id (json-int)
-
-Example:
-
--> { "execute": "cpu-add", "arguments": { "id": 2 } }
-<- { "return": {} }
-
-memsave
--------
-
-Save to disk virtual memory dump starting at 'val' of size 'size'.
-
-Arguments:
-
-- "val": the starting address (json-int)
-- "size": the memory size, in bytes (json-int)
-- "filename": file path (json-string)
-- "cpu": virtual CPU index (json-int, optional)
-
-Example:
-
--> { "execute": "memsave",
- "arguments": { "val": 10,
- "size": 100,
- "filename": "/tmp/virtual-mem-dump" } }
-<- { "return": {} }
-
-pmemsave
---------
-
-Save to disk physical memory dump starting at 'val' of size 'size'.
-
-Arguments:
-
-- "val": the starting address (json-int)
-- "size": the memory size, in bytes (json-int)
-- "filename": file path (json-string)
-
-Example:
-
--> { "execute": "pmemsave",
- "arguments": { "val": 10,
- "size": 100,
- "filename": "/tmp/physical-mem-dump" } }
-<- { "return": {} }
-
-inject-nmi
-----------
-
-Inject an NMI on the default CPU (x86/s390) or all CPUs (ppc64).
-
-Arguments: None.
-
-Example:
-
--> { "execute": "inject-nmi" }
-<- { "return": {} }
-
-Note: inject-nmi fails when the guest doesn't support injecting.
-
-ringbuf-write
--------------
-
-Write to a ring buffer character device.
-
-Arguments:
-
-- "device": ring buffer character device name (json-string)
-- "data": data to write (json-string)
-- "format": data format (json-string, optional)
- - Possible values: "utf8" (default), "base64"
-
-Example:
-
--> { "execute": "ringbuf-write",
- "arguments": { "device": "foo",
- "data": "abcdefgh",
- "format": "utf8" } }
-<- { "return": {} }
-
-ringbuf-read
--------------
-
-Read from a ring buffer character device.
-
-Arguments:
-
-- "device": ring buffer character device name (json-string)
-- "size": how many bytes to read at most (json-int)
- - Number of data bytes, not number of characters in encoded data
-- "format": data format (json-string, optional)
- - Possible values: "utf8" (default), "base64"
- - Naturally, format "utf8" works only when the ring buffer
- contains valid UTF-8 text. Invalid UTF-8 sequences get
- replaced. Bug: replacement doesn't work. Bug: can screw
- up on encountering NUL characters, after the ring buffer
- lost data, and when reading stops because the size limit
- is reached.
-
-Example:
-
--> { "execute": "ringbuf-read",
- "arguments": { "device": "foo",
- "size": 1000,
- "format": "utf8" } }
-<- {"return": "abcdefgh"}
-
-xen-save-devices-state
--------
-
-Save the state of all devices to file. The RAM and the block devices
-of the VM are not saved by this command.
-
-Arguments:
-
-- "filename": the file to save the state of the devices to as binary
-data. See xen-save-devices-state.txt for a description of the binary
-format.
-
-Example:
-
--> { "execute": "xen-save-devices-state",
- "arguments": { "filename": "/tmp/save" } }
-<- { "return": {} }
-
-xen-load-devices-state
-----------------------
-
-Load the state of all devices from file. The RAM and the block devices
-of the VM are not loaded by this command.
-
-Arguments:
-
-- "filename": the file to load the state of the devices from as binary
-data. See xen-save-devices-state.txt for a description of the binary
-format.
-
-Example:
-
--> { "execute": "xen-load-devices-state",
- "arguments": { "filename": "/tmp/resume" } }
-<- { "return": {} }
-
-xen-set-global-dirty-log
--------
-
-Enable or disable the global dirty log mode.
-
-Arguments:
-
-- "enable": Enable it or disable it.
-
-Example:
-
--> { "execute": "xen-set-global-dirty-log",
- "arguments": { "enable": true } }
-<- { "return": {} }
-
-migrate
--------
-
-Migrate to URI.
-
-Arguments:
-
-- "blk": block migration, full disk copy (json-bool, optional)
-- "inc": incremental disk copy (json-bool, optional)
-- "uri": Destination URI (json-string)
-
-Example:
-
--> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
-<- { "return": {} }
-
-Notes:
-
-(1) The 'query-migrate' command should be used to check migration's progress
- and final result (this information is provided by the 'status' member)
-(2) All boolean arguments default to false
-(3) The user Monitor's "detach" argument is invalid in QMP and should not
- be used
-
-migrate_cancel
---------------
-
-Cancel the current migration.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "migrate_cancel" }
-<- { "return": {} }
-
-migrate-incoming
-----------------
-
-Continue an incoming migration
-
-Arguments:
-
-- "uri": Source/listening URI (json-string)
-
-Example:
-
--> { "execute": "migrate-incoming", "arguments": { "uri": "tcp::4446" } }
-<- { "return": {} }
-
-Notes:
-
-(1) QEMU must be started with -incoming defer to allow migrate-incoming to
- be used
-(2) The uri format is the same as for -incoming
-
-migrate-set-cache-size
-----------------------
-
-Set cache size to be used by XBZRLE migration, the cache size will be rounded
-down to the nearest power of 2
-
-Arguments:
-
-- "value": cache size in bytes (json-int)
-
-Example:
-
--> { "execute": "migrate-set-cache-size", "arguments": { "value": 536870912 } }
-<- { "return": {} }
-
-migrate-start-postcopy
-----------------------
-
-Switch an in-progress migration to postcopy mode. Ignored after the end of
-migration (or once already in postcopy).
-
-Example:
--> { "execute": "migrate-start-postcopy" }
-<- { "return": {} }
-
-query-migrate-cache-size
-------------------------
-
-Show cache size to be used by XBZRLE migration
-
-returns a json-object with the following information:
-- "size" : json-int
-
-Example:
-
--> { "execute": "query-migrate-cache-size" }
-<- { "return": 67108864 }
-
-migrate_set_speed
------------------
-
-Set maximum speed for migrations.
-
-Arguments:
-
-- "value": maximum speed, in bytes per second (json-int)
-
-Example:
-
--> { "execute": "migrate_set_speed", "arguments": { "value": 1024 } }
-<- { "return": {} }
-
-migrate_set_downtime
---------------------
-
-Set maximum tolerated downtime (in seconds) for migrations.
-
-Arguments:
-
-- "value": maximum downtime (json-number)
-
-Example:
-
--> { "execute": "migrate_set_downtime", "arguments": { "value": 0.1 } }
-<- { "return": {} }
-
-x-colo-lost-heartbeat
---------------------
-
-Tell COLO that heartbeat is lost, a failover or takeover is needed.
-
-Example:
-
--> { "execute": "x-colo-lost-heartbeat" }
-<- { "return": {} }
-
-client_migrate_info
--------------------
-
-Set migration information for remote display. This makes the server
-ask the client to automatically reconnect using the new parameters
-once migration finished successfully. Only implemented for SPICE.
-
-Arguments:
-
-- "protocol": must be "spice" (json-string)
-- "hostname": migration target hostname (json-string)
-- "port": spice tcp port for plaintext channels (json-int, optional)
-- "tls-port": spice tcp port for tls-secured channels (json-int, optional)
-- "cert-subject": server certificate subject (json-string, optional)
-
-Example:
-
--> { "execute": "client_migrate_info",
- "arguments": { "protocol": "spice",
- "hostname": "virt42.lab.kraxel.org",
- "port": 1234 } }
-<- { "return": {} }
-
-dump
-
-
-Dump guest memory to file. The file can be processed with crash or gdb.
-
-Arguments:
-
-- "paging": do paging to get guest's memory mapping (json-bool)
-- "protocol": destination file(started with "file:") or destination file
- descriptor (started with "fd:") (json-string)
-- "detach": if specified, command will return immediately, without waiting
- for the dump to finish. The user can track progress using
- "query-dump". (json-bool)
-- "begin": the starting physical address. It's optional, and should be specified
- with length together (json-int)
-- "length": the memory size, in bytes. It's optional, and should be specified
- with begin together (json-int)
-- "format": the format of guest memory dump. It's optional, and can be
- elf|kdump-zlib|kdump-lzo|kdump-snappy, but non-elf formats will
- conflict with paging and filter, ie. begin and length (json-string)
-
-Example:
-
--> { "execute": "dump-guest-memory", "arguments": { "protocol": "fd:dump" } }
-<- { "return": {} }
-
-Notes:
-
-(1) All boolean arguments default to false
-
-query-dump-guest-memory-capability
-----------
-
-Show available formats for 'dump-guest-memory'
-
-Example:
-
--> { "execute": "query-dump-guest-memory-capability" }
-<- { "return": { "formats":
- ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] }
-
-query-dump
-----------
-
-Query background dump status.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "query-dump" }
-<- { "return": { "status": "active", "completed": 1024000,
- "total": 2048000 } }
-
-dump-skeys
-----------
-
-Save guest storage keys to file.
-
-Arguments:
-
-- "filename": file path (json-string)
-
-Example:
-
--> { "execute": "dump-skeys", "arguments": { "filename": "/tmp/skeys" } }
-<- { "return": {} }
-
-netdev_add
-----------
-
-Add host network device.
-
-Arguments:
-
-- "type": the device type, "tap", "user", ... (json-string)
-- "id": the device's ID, must be unique (json-string)
-- device options
-
-Example:
-
--> { "execute": "netdev_add",
- "arguments": { "type": "user", "id": "netdev1",
- "dnssearch": "example.org" } }
-<- { "return": {} }
-
-Note: The supported device options are the same ones supported by the '-netdev'
- command-line argument, which are listed in the '-help' output or QEMU's
- manual
-
-netdev_del
-----------
-
-Remove host network device.
-
-Arguments:
-
-- "id": the device's ID, must be unique (json-string)
-
-Example:
-
--> { "execute": "netdev_del", "arguments": { "id": "netdev1" } }
-<- { "return": {} }
-
-
-object-add
-----------
-
-Create QOM object.
-
-Arguments:
-
-- "qom-type": the object's QOM type, i.e. the class name (json-string)
-- "id": the object's ID, must be unique (json-string)
-- "props": a dictionary of object property values (optional, json-dict)
-
-Example:
-
--> { "execute": "object-add", "arguments": { "qom-type": "rng-random", "id": "rng1",
- "props": { "filename": "/dev/hwrng" } } }
-<- { "return": {} }
-
-object-del
-----------
-
-Remove QOM object.
-
-Arguments:
-
-- "id": the object's ID (json-string)
-
-Example:
-
--> { "execute": "object-del", "arguments": { "id": "rng1" } }
-<- { "return": {} }
-
-
-block_resize
-------------
-
-Resize a block image while a guest is running.
-
-Arguments:
-
-- "device": the device's ID, must be unique (json-string)
-- "node-name": the node name in the block driver state graph (json-string)
-- "size": new size
-
-Example:
-
--> { "execute": "block_resize", "arguments": { "device": "scratch", "size": 1073741824 } }
-<- { "return": {} }
-
-block-stream
-------------
-
-Copy data from a backing file into a block device.
-
-Arguments:
-
-- "job-id": Identifier for the newly-created block job. If omitted,
- the device name will be used. (json-string, optional)
-- "device": The device name or node-name of a root node (json-string)
-- "base": The file name of the backing image above which copying starts.
- It cannot be set if 'base-node' is also set (json-string, optional)
-- "base-node": the node name of the backing image above which copying starts.
- It cannot be set if 'base' is also set.
- (json-string, optional) (Since 2.8)
-- "backing-file": The backing file string to write into the active layer. This
- filename is not validated.
-
- If a pathname string is such that it cannot be resolved by
- QEMU, that means that subsequent QMP or HMP commands must use
- node-names for the image in question, as filename lookup
- methods will fail.
-
- If not specified, QEMU will automatically determine the
- backing file string to use, or error out if there is no
- obvious choice. Care should be taken when specifying the
- string, to specify a valid filename or protocol.
- (json-string, optional) (Since 2.1)
-- "speed": the maximum speed, in bytes per second (json-int, optional)
-- "on-error": the action to take on an error (default 'report'). 'stop' and
- 'enospc' can only be used if the block device supports io-status.
- (json-string, optional) (Since 2.1)
-
-Example:
-
--> { "execute": "block-stream", "arguments": { "device": "virtio0",
- "base": "/tmp/master.qcow2" } }
-<- { "return": {} }
-
-block-commit
-------------
-
-Live commit of data from overlay image nodes into backing nodes - i.e., writes
-data between 'top' and 'base' into 'base'.
-
-Arguments:
-
-- "job-id": Identifier for the newly-created block job. If omitted,
- the device name will be used. (json-string, optional)
-- "device": The device name or node-name of a root node (json-string)
-- "base": The file name of the backing image to write data into.
- If not specified, this is the deepest backing image
- (json-string, optional)
-- "top": The file name of the backing image within the image chain,
- which contains the topmost data to be committed down. If
- not specified, this is the active layer. (json-string, optional)
-
-- backing-file: The backing file string to write into the overlay
- image of 'top'. If 'top' is the active layer,
- specifying a backing file string is an error. This
- filename is not validated.
-
- If a pathname string is such that it cannot be
- resolved by QEMU, that means that subsequent QMP or
- HMP commands must use node-names for the image in
- question, as filename lookup methods will fail.
-
- If not specified, QEMU will automatically determine
- the backing file string to use, or error out if
- there is no obvious choice. Care should be taken
- when specifying the string, to specify a valid
- filename or protocol.
- (json-string, optional) (Since 2.1)
-
- If top == base, that is an error.
- If top == active, the job will not be completed by itself,
- user needs to complete the job with the block-job-complete
- command after getting the ready event. (Since 2.0)
-
- If the base image is smaller than top, then the base image
- will be resized to be the same size as top. If top is
- smaller than the base image, the base will not be
- truncated. If you want the base image size to match the
- size of the smaller top, you can safely truncate it
- yourself once the commit operation successfully completes.
- (json-string)
-- "speed": the maximum speed, in bytes per second (json-int, optional)
-
-
-Example:
-
--> { "execute": "block-commit", "arguments": { "device": "virtio0",
- "top": "/tmp/snap1.qcow2" } }
-<- { "return": {} }
-
-drive-backup
-------------
-
-Start a point-in-time copy of a block device to a new destination. The
-status of ongoing drive-backup operations can be checked with
-query-block-jobs where the BlockJobInfo.type field has the value 'backup'.
-The operation can be stopped before it has completed using the
-block-job-cancel command.
-
-Arguments:
-
-- "job-id": Identifier for the newly-created block job. If omitted,
- the device name will be used. (json-string, optional)
-- "device": the device name or node-name of a root node which should be copied.
- (json-string)
-- "target": the target of the new image. If the file exists, or if it is a
- device, the existing file/device will be used as the new
- destination. If it does not exist, a new file will be created.
- (json-string)
-- "format": the format of the new destination, default is to probe if 'mode' is
- 'existing', else the format of the source
- (json-string, optional)
-- "sync": what parts of the disk image should be copied to the destination;
- possibilities include "full" for all the disk, "top" for only the sectors
- allocated in the topmost image, "incremental" for only the dirty sectors in
- the bitmap, or "none" to only replicate new I/O (MirrorSyncMode).
-- "bitmap": dirty bitmap name for sync==incremental. Must be present if sync
- is "incremental", must NOT be present otherwise.
-- "mode": whether and how QEMU should create a new image
- (NewImageMode, optional, default 'absolute-paths')
-- "speed": the maximum speed, in bytes per second (json-int, optional)
-- "compress": true to compress data, if the target format supports it.
- (json-bool, optional, default false)
-- "on-source-error": the action to take on an error on the source, default
- 'report'. 'stop' and 'enospc' can only be used
- if the block device supports io-status.
- (BlockdevOnError, optional)
-- "on-target-error": the action to take on an error on the target, default
- 'report' (no limitations, since this applies to
- a different block device than device).
- (BlockdevOnError, optional)
-
-Example:
--> { "execute": "drive-backup", "arguments": { "device": "drive0",
- "sync": "full",
- "target": "backup.img" } }
-<- { "return": {} }
-
-blockdev-backup
----------------
-
-The device version of drive-backup: this command takes an existing named device
-as backup target.
-
-Arguments:
-
-- "job-id": Identifier for the newly-created block job. If omitted,
- the device name will be used. (json-string, optional)
-- "device": the device name or node-name of a root node which should be copied.
- (json-string)
-- "target": the name of the backup target device. (json-string)
-- "sync": what parts of the disk image should be copied to the destination;
- possibilities include "full" for all the disk, "top" for only the
- sectors allocated in the topmost image, or "none" to only replicate
- new I/O (MirrorSyncMode).
-- "speed": the maximum speed, in bytes per second (json-int, optional)
-- "compress": true to compress data, if the target format supports it.
- (json-bool, optional, default false)
-- "on-source-error": the action to take on an error on the source, default
- 'report'. 'stop' and 'enospc' can only be used
- if the block device supports io-status.
- (BlockdevOnError, optional)
-- "on-target-error": the action to take on an error on the target, default
- 'report' (no limitations, since this applies to
- a different block device than device).
- (BlockdevOnError, optional)
-
-Example:
--> { "execute": "blockdev-backup", "arguments": { "device": "src-id",
- "sync": "full",
- "target": "tgt-id" } }
-<- { "return": {} }
-
-transaction
------------
-
-Atomically operate on one or more block devices. Operations that are
-currently supported:
-
- - drive-backup
- - blockdev-backup
- - blockdev-snapshot-sync
- - blockdev-snapshot-internal-sync
- - abort
- - block-dirty-bitmap-add
- - block-dirty-bitmap-clear
-
-Refer to the qemu/qapi-schema.json file for minimum required QEMU
-versions for these operations. A list of dictionaries is accepted,
-that contains the actions to be performed. If there is any failure
-performing any of the operations, all operations for the group are
-abandoned.
-
-For external snapshots, the dictionary contains the device, the file to use for
-the new snapshot, and the format. The default format, if not specified, is
-qcow2.
-
-Each new snapshot defaults to being created by QEMU (wiping any
-contents if the file already exists), but it is also possible to reuse
-an externally-created file. In the latter case, you should ensure that
-the new image file has the same contents as the current one; QEMU cannot
-perform any meaningful check. Typically this is achieved by using the
-current image file as the backing file for the new image.
-
-On failure, the original disks pre-snapshot attempt will be used.
-
-For internal snapshots, the dictionary contains the device and the snapshot's
-name. If an internal snapshot matching name already exists, the request will
-be rejected. Only some image formats support it, for example, qcow2, rbd,
-and sheepdog.
-
-On failure, qemu will try delete the newly created internal snapshot in the
-transaction. When an I/O error occurs during deletion, the user needs to fix
-it later with qemu-img or other command.
-
-Arguments:
-
-actions array:
- - "type": the operation to perform (json-string). Possible
- values: "drive-backup", "blockdev-backup",
- "blockdev-snapshot-sync",
- "blockdev-snapshot-internal-sync",
- "abort", "block-dirty-bitmap-add",
- "block-dirty-bitmap-clear"
- - "data": a dictionary. The contents depend on the value
- of "type". When "type" is "blockdev-snapshot-sync":
- - "device": device name to snapshot (json-string)
- - "node-name": graph node name to snapshot (json-string)
- - "snapshot-file": name of new image file (json-string)
- - "snapshot-node-name": graph node name of the new snapshot (json-string)
- - "format": format of new image (json-string, optional)
- - "mode": whether and how QEMU should create the snapshot file
- (NewImageMode, optional, default "absolute-paths")
- When "type" is "blockdev-snapshot-internal-sync":
- - "device": the device name or node-name of a root node to snapshot
- (json-string)
- - "name": name of the new snapshot (json-string)
-
-Example:
-
--> { "execute": "transaction",
- "arguments": { "actions": [
- { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd0",
- "snapshot-file": "/some/place/my-image",
- "format": "qcow2" } },
- { "type": "blockdev-snapshot-sync", "data" : { "node-name": "myfile",
- "snapshot-file": "/some/place/my-image2",
- "snapshot-node-name": "node3432",
- "mode": "existing",
- "format": "qcow2" } },
- { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd1",
- "snapshot-file": "/some/place/my-image2",
- "mode": "existing",
- "format": "qcow2" } },
- { "type": "blockdev-snapshot-internal-sync", "data" : {
- "device": "ide-hd2",
- "name": "snapshot0" } } ] } }
-<- { "return": {} }
-
-block-dirty-bitmap-add
-----------------------
-Since 2.4
-
-Create a dirty bitmap with a name on the device, and start tracking the writes.
-
-Arguments:
-
-- "node": device/node on which to create dirty bitmap (json-string)
-- "name": name of the new dirty bitmap (json-string)
-- "granularity": granularity to track writes with (int, optional)
-
-Example:
-
--> { "execute": "block-dirty-bitmap-add", "arguments": { "node": "drive0",
- "name": "bitmap0" } }
-<- { "return": {} }
-
-block-dirty-bitmap-remove
--------------------------
-Since 2.4
-
-Stop write tracking and remove the dirty bitmap that was created with
-block-dirty-bitmap-add.
-
-Arguments:
-
-- "node": device/node on which to remove dirty bitmap (json-string)
-- "name": name of the dirty bitmap to remove (json-string)
-
-Example:
-
--> { "execute": "block-dirty-bitmap-remove", "arguments": { "node": "drive0",
- "name": "bitmap0" } }
-<- { "return": {} }
-
-block-dirty-bitmap-clear
-------------------------
-Since 2.4
-
-Reset the dirty bitmap associated with a node so that an incremental backup
-from this point in time forward will only backup clusters modified after this
-clear operation.
-
-Arguments:
-
-- "node": device/node on which to remove dirty bitmap (json-string)
-- "name": name of the dirty bitmap to remove (json-string)
-
-Example:
-
--> { "execute": "block-dirty-bitmap-clear", "arguments": { "node": "drive0",
- "name": "bitmap0" } }
-<- { "return": {} }
-
-blockdev-snapshot-sync
-----------------------
-
-Synchronous snapshot of a block device. snapshot-file specifies the
-target of the new image. If the file exists, or if it is a device, the
-snapshot will be created in the existing file/device. If does not
-exist, a new file will be created. format specifies the format of the
-snapshot image, default is qcow2.
-
-Arguments:
-
-- "device": device name to snapshot (json-string)
-- "node-name": graph node name to snapshot (json-string)
-- "snapshot-file": name of new image file (json-string)
-- "snapshot-node-name": graph node name of the new snapshot (json-string)
-- "mode": whether and how QEMU should create the snapshot file
- (NewImageMode, optional, default "absolute-paths")
-- "format": format of new image (json-string, optional)
-
-Example:
-
--> { "execute": "blockdev-snapshot-sync", "arguments": { "device": "ide-hd0",
- "snapshot-file":
- "/some/place/my-image",
- "format": "qcow2" } }
-<- { "return": {} }
-
-blockdev-snapshot
------------------
-Since 2.5
-
-Create a snapshot, by installing 'node' as the backing image of
-'overlay'. Additionally, if 'node' is associated with a block
-device, the block device changes to using 'overlay' as its new active
-image.
-
-Arguments:
-
-- "node": device that will have a snapshot created (json-string)
-- "overlay": device that will have 'node' as its backing image (json-string)
-
-Example:
-
--> { "execute": "blockdev-add",
- "arguments": { "driver": "qcow2",
- "node-name": "node1534",
- "file": { "driver": "file",
- "filename": "hd1.qcow2" },
- "backing": "" } }
-
-<- { "return": {} }
-
--> { "execute": "blockdev-snapshot", "arguments": { "node": "ide-hd0",
- "overlay": "node1534" } }
-<- { "return": {} }
-
-blockdev-snapshot-internal-sync
--------------------------------
-
-Synchronously take an internal snapshot of a block device when the format of
-image used supports it. If the name is an empty string, or a snapshot with
-name already exists, the operation will fail.
-
-Arguments:
-
-- "device": the device name or node-name of a root node to snapshot
- (json-string)
-- "name": name of the new snapshot (json-string)
-
-Example:
-
--> { "execute": "blockdev-snapshot-internal-sync",
- "arguments": { "device": "ide-hd0",
- "name": "snapshot0" }
- }
-<- { "return": {} }
-
-blockdev-snapshot-delete-internal-sync
---------------------------------------
-
-Synchronously delete an internal snapshot of a block device when the format of
-image used supports it. The snapshot is identified by name or id or both. One
-of name or id is required. If the snapshot is not found, the operation will
-fail.
-
-Arguments:
-
-- "device": the device name or node-name of a root node (json-string)
-- "id": ID of the snapshot (json-string, optional)
-- "name": name of the snapshot (json-string, optional)
-
-Example:
-
--> { "execute": "blockdev-snapshot-delete-internal-sync",
- "arguments": { "device": "ide-hd0",
- "name": "snapshot0" }
- }
-<- { "return": {
- "id": "1",
- "name": "snapshot0",
- "vm-state-size": 0,
- "date-sec": 1000012,
- "date-nsec": 10,
- "vm-clock-sec": 100,
- "vm-clock-nsec": 20
- }
- }
-
-drive-mirror
-------------
-
-Start mirroring a block device's writes to a new destination. target
-specifies the target of the new image. If the file exists, or if it is
-a device, it will be used as the new destination for writes. If it does not
-exist, a new file will be created. format specifies the format of the
-mirror image, default is to probe if mode='existing', else the format
-of the source.
-
-Arguments:
-
-- "job-id": Identifier for the newly-created block job. If omitted,
- the device name will be used. (json-string, optional)
-- "device": the device name or node-name of a root node whose writes should be
- mirrored. (json-string)
-- "target": name of new image file (json-string)
-- "format": format of new image (json-string, optional)
-- "node-name": the name of the new block driver state in the node graph
- (json-string, optional)
-- "replaces": the block driver node name to replace when finished
- (json-string, optional)
-- "mode": how an image file should be created into the target
- file/device (NewImageMode, optional, default 'absolute-paths')
-- "speed": maximum speed of the streaming job, in bytes per second
- (json-int)
-- "granularity": granularity of the dirty bitmap, in bytes (json-int, optional)
-- "buf-size": maximum amount of data in flight from source to target, in bytes
- (json-int, default 10M)
-- "sync": what parts of the disk image should be copied to the destination;
- possibilities include "full" for all the disk, "top" for only the sectors
- allocated in the topmost image, or "none" to only replicate new I/O
- (MirrorSyncMode).
-- "on-source-error": the action to take on an error on the source
- (BlockdevOnError, default 'report')
-- "on-target-error": the action to take on an error on the target
- (BlockdevOnError, default 'report')
-- "unmap": whether the target sectors should be discarded where source has only
- zeroes. (json-bool, optional, default true)
-
-The default value of the granularity is the image cluster size clamped
-between 4096 and 65536, if the image format defines one. If the format
-does not define a cluster size, the default value of the granularity
-is 65536.
-
-
-Example:
-
--> { "execute": "drive-mirror", "arguments": { "device": "ide-hd0",
- "target": "/some/place/my-image",
- "sync": "full",
- "format": "qcow2" } }
-<- { "return": {} }
-
-blockdev-mirror
-------------
-
-Start mirroring a block device's writes to another block device. target
-specifies the target of mirror operation.
-
-Arguments:
-
-- "job-id": Identifier for the newly-created block job. If omitted,
- the device name will be used. (json-string, optional)
-- "device": The device name or node-name of a root node whose writes should be
- mirrored (json-string)
-- "target": device name to mirror to (json-string)
-- "replaces": the block driver node name to replace when finished
- (json-string, optional)
-- "speed": maximum speed of the streaming job, in bytes per second
- (json-int)
-- "granularity": granularity of the dirty bitmap, in bytes (json-int, optional)
-- "buf_size": maximum amount of data in flight from source to target, in bytes
- (json-int, default 10M)
-- "sync": what parts of the disk image should be copied to the destination;
- possibilities include "full" for all the disk, "top" for only the sectors
- allocated in the topmost image, or "none" to only replicate new I/O
- (MirrorSyncMode).
-- "on-source-error": the action to take on an error on the source
- (BlockdevOnError, default 'report')
-- "on-target-error": the action to take on an error on the target
- (BlockdevOnError, default 'report')
-
-The default value of the granularity is the image cluster size clamped
-between 4096 and 65536, if the image format defines one. If the format
-does not define a cluster size, the default value of the granularity
-is 65536.
-
-Example:
-
--> { "execute": "blockdev-mirror", "arguments": { "device": "ide-hd0",
- "target": "target0",
- "sync": "full" } }
-<- { "return": {} }
-
-change-backing-file
--------------------
-Since: 2.1
-
-Change the backing file in the image file metadata. This does not cause
-QEMU to reopen the image file to reparse the backing filename (it may,
-however, perform a reopen to change permissions from r/o -> r/w -> r/o,
-if needed). The new backing file string is written into the image file
-metadata, and the QEMU internal strings are updated.
-
-Arguments:
-
-- "image-node-name": The name of the block driver state node of the
- image to modify. The "device" is argument is used to
- verify "image-node-name" is in the chain described by
- "device".
- (json-string, optional)
-
-- "device": The device name or node-name of the root node that owns
- image-node-name.
- (json-string)
-
-- "backing-file": The string to write as the backing file. This string is
- not validated, so care should be taken when specifying
- the string or the image chain may not be able to be
- reopened again.
- (json-string)
-
-Returns: Nothing on success
- If "device" does not exist or cannot be determined, DeviceNotFound
-
-balloon
--------
-
-Request VM to change its memory allocation (in bytes).
-
-Arguments:
-
-- "value": New memory allocation (json-int)
-
-Example:
-
--> { "execute": "balloon", "arguments": { "value": 536870912 } }
-<- { "return": {} }
-
-set_link
---------
-
-Change the link status of a network adapter.
-
-Arguments:
-
-- "name": network device name (json-string)
-- "up": status is up (json-bool)
-
-Example:
-
--> { "execute": "set_link", "arguments": { "name": "e1000.0", "up": false } }
-<- { "return": {} }
-
-getfd
------
-
-Receive a file descriptor via SCM rights and assign it a name.
-
-Arguments:
-
-- "fdname": file descriptor name (json-string)
-
-Example:
-
--> { "execute": "getfd", "arguments": { "fdname": "fd1" } }
-<- { "return": {} }
-
-Notes:
-
-(1) If the name specified by the "fdname" argument already exists,
- the file descriptor assigned to it will be closed and replaced
- by the received file descriptor.
-(2) The 'closefd' command can be used to explicitly close the file
- descriptor when it is no longer needed.
-
-closefd
--------
-
-Close a file descriptor previously passed via SCM rights.
-
-Arguments:
-
-- "fdname": file descriptor name (json-string)
-
-Example:
-
--> { "execute": "closefd", "arguments": { "fdname": "fd1" } }
-<- { "return": {} }
-
-add-fd
--------
-
-Add a file descriptor, that was passed via SCM rights, to an fd set.
-
-Arguments:
-
-- "fdset-id": The ID of the fd set to add the file descriptor to.
- (json-int, optional)
-- "opaque": A free-form string that can be used to describe the fd.
- (json-string, optional)
-
-Return a json-object with the following information:
-
-- "fdset-id": The ID of the fd set that the fd was added to. (json-int)
-- "fd": The file descriptor that was received via SCM rights and added to the
- fd set. (json-int)
-
-Example:
-
--> { "execute": "add-fd", "arguments": { "fdset-id": 1 } }
-<- { "return": { "fdset-id": 1, "fd": 3 } }
-
-Notes:
-
-(1) The list of fd sets is shared by all monitor connections.
-(2) If "fdset-id" is not specified, a new fd set will be created.
-
-remove-fd
----------
-
-Remove a file descriptor from an fd set.
-
-Arguments:
-
-- "fdset-id": The ID of the fd set that the file descriptor belongs to.
- (json-int)
-- "fd": The file descriptor that is to be removed. (json-int, optional)
-
-Example:
-
--> { "execute": "remove-fd", "arguments": { "fdset-id": 1, "fd": 3 } }
-<- { "return": {} }
-
-Notes:
-
-(1) The list of fd sets is shared by all monitor connections.
-(2) If "fd" is not specified, all file descriptors in "fdset-id" will be
- removed.
-
-query-fdsets
--------------
-
-Return information describing all fd sets.
-
-Arguments: None
-
-Example:
-
--> { "execute": "query-fdsets" }
-<- { "return": [
- {
- "fds": [
- {
- "fd": 30,
- "opaque": "rdonly:/path/to/file"
- },
- {
- "fd": 24,
- "opaque": "rdwr:/path/to/file"
- }
- ],
- "fdset-id": 1
- },
- {
- "fds": [
- {
- "fd": 28
- },
- {
- "fd": 29
- }
- ],
- "fdset-id": 0
- }
- ]
- }
-
-Note: The list of fd sets is shared by all monitor connections.
-
-block_passwd
-------------
-
-Set the password of encrypted block devices.
-
-Arguments:
-
-- "device": device name (json-string)
-- "node-name": name in the block driver state graph (json-string)
-- "password": password (json-string)
-
-Example:
-
--> { "execute": "block_passwd", "arguments": { "device": "ide0-hd0",
- "password": "12345" } }
-<- { "return": {} }
-
-block_set_io_throttle
-------------
-
-Change I/O throttle limits for a block drive.
-
-Arguments:
-
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-- "bps": total throughput limit in bytes per second (json-int)
-- "bps_rd": read throughput limit in bytes per second (json-int)
-- "bps_wr": write throughput limit in bytes per second (json-int)
-- "iops": total I/O operations per second (json-int)
-- "iops_rd": read I/O operations per second (json-int)
-- "iops_wr": write I/O operations per second (json-int)
-- "bps_max": total throughput limit during bursts, in bytes (json-int, optional)
-- "bps_rd_max": read throughput limit during bursts, in bytes (json-int, optional)
-- "bps_wr_max": write throughput limit during bursts, in bytes (json-int, optional)
-- "iops_max": total I/O operations per second during bursts (json-int, optional)
-- "iops_rd_max": read I/O operations per second during bursts (json-int, optional)
-- "iops_wr_max": write I/O operations per second during bursts (json-int, optional)
-- "bps_max_length": maximum length of the @bps_max burst period, in seconds (json-int, optional)
-- "bps_rd_max_length": maximum length of the @bps_rd_max burst period, in seconds (json-int, optional)
-- "bps_wr_max_length": maximum length of the @bps_wr_max burst period, in seconds (json-int, optional)
-- "iops_max_length": maximum length of the @iops_max burst period, in seconds (json-int, optional)
-- "iops_rd_max_length": maximum length of the @iops_rd_max burst period, in seconds (json-int, optional)
-- "iops_wr_max_length": maximum length of the @iops_wr_max burst period, in seconds (json-int, optional)
-- "iops_size": I/O size in bytes when limiting (json-int, optional)
-- "group": throttle group name (json-string, optional)
-
-Example:
-
--> { "execute": "block_set_io_throttle", "arguments": { "id": "ide0-1-0",
- "bps": 1000000,
- "bps_rd": 0,
- "bps_wr": 0,
- "iops": 0,
- "iops_rd": 0,
- "iops_wr": 0,
- "bps_max": 8000000,
- "bps_rd_max": 0,
- "bps_wr_max": 0,
- "iops_max": 0,
- "iops_rd_max": 0,
- "iops_wr_max": 0,
- "bps_max_length": 60,
- "iops_size": 0 } }
-<- { "return": {} }
-
-set_password
-------------
-
-Set the password for vnc/spice protocols.
-
-Arguments:
-
-- "protocol": protocol name (json-string)
-- "password": password (json-string)
-- "connected": [ keep | disconnect | fail ] (json-string, optional)
-
-Example:
-
--> { "execute": "set_password", "arguments": { "protocol": "vnc",
- "password": "secret" } }
-<- { "return": {} }
-
-expire_password
----------------
-
-Set the password expire time for vnc/spice protocols.
-
-Arguments:
-
-- "protocol": protocol name (json-string)
-- "time": [ now | never | +secs | secs ] (json-string)
-
-Example:
-
--> { "execute": "expire_password", "arguments": { "protocol": "vnc",
- "time": "+60" } }
-<- { "return": {} }
-
-add_client
-----------
-
-Add a graphics client
-
-Arguments:
-
-- "protocol": protocol name (json-string)
-- "fdname": file descriptor name (json-string)
-- "skipauth": whether to skip authentication (json-bool, optional)
-- "tls": whether to perform TLS (json-bool, optional)
-
-Example:
-
--> { "execute": "add_client", "arguments": { "protocol": "vnc",
- "fdname": "myclient" } }
-<- { "return": {} }
-
-qmp_capabilities
-----------------
-
-Enable QMP capabilities.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "qmp_capabilities" }
-<- { "return": {} }
-
-Note: This command must be issued before issuing any other command.
-
-human-monitor-command
----------------------
-
-Execute a Human Monitor command.
-
-Arguments:
-
-- command-line: the command name and its arguments, just like the
- Human Monitor's shell (json-string)
-- cpu-index: select the CPU number to be used by commands which access CPU
- data, like 'info registers'. The Monitor selects CPU 0 if this
- argument is not provided (json-int, optional)
-
-Example:
-
--> { "execute": "human-monitor-command", "arguments": { "command-line": "info kvm" } }
-<- { "return": "kvm support: enabled\r\n" }
-
-Notes:
-
-(1) The Human Monitor is NOT an stable interface, this means that command
- names, arguments and responses can change or be removed at ANY time.
- Applications that rely on long term stability guarantees should NOT
- use this command
-
-(2) Limitations:
-
- o This command is stateless, this means that commands that depend
- on state information (such as getfd) might not work
-
- o Commands that prompt the user for data (eg. 'cont' when the block
- device is encrypted) don't currently work
-
-3. Query Commands
-=================
-
-
-query-version
--------------
-
-Show QEMU version.
-
-Return a json-object with the following information:
-
-- "qemu": A json-object containing three integer values:
- - "major": QEMU's major version (json-int)
- - "minor": QEMU's minor version (json-int)
- - "micro": QEMU's micro version (json-int)
-- "package": package's version (json-string)
-
-Example:
-
--> { "execute": "query-version" }
-<- {
- "return":{
- "qemu":{
- "major":0,
- "minor":11,
- "micro":5
- },
- "package":""
- }
- }
-
-query-commands
---------------
-
-List QMP available commands.
-
-Each command is represented by a json-object, the returned value is a json-array
-of all commands.
-
-Each json-object contain:
-
-- "name": command's name (json-string)
-
-Example:
-
--> { "execute": "query-commands" }
-<- {
- "return":[
- {
- "name":"query-balloon"
- },
- {
- "name":"system_powerdown"
- }
- ]
- }
-
-Note: This example has been shortened as the real response is too long.
-
-query-events
---------------
-
-List QMP available events.
-
-Each event is represented by a json-object, the returned value is a json-array
-of all events.
-
-Each json-object contains:
-
-- "name": event's name (json-string)
-
-Example:
-
--> { "execute": "query-events" }
-<- {
- "return":[
- {
- "name":"SHUTDOWN"
- },
- {
- "name":"RESET"
- }
- ]
- }
-
-Note: This example has been shortened as the real response is too long.
-
-query-qmp-schema
-----------------
-
-Return the QMP wire schema. The returned value is a json-array of
-named schema entities. Entities are commands, events and various
-types. See docs/qapi-code-gen.txt for information on their structure
-and intended use.
-
-query-chardev
--------------
-
-Each device is represented by a json-object. The returned value is a json-array
-of all devices.
-
-Each json-object contain the following:
-
-- "label": device's label (json-string)
-- "filename": device's file (json-string)
-- "frontend-open": open/closed state of the frontend device attached to this
- backend (json-bool)
-
-Example:
-
--> { "execute": "query-chardev" }
-<- {
- "return": [
- {
- "label": "charchannel0",
- "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.agent,server",
- "frontend-open": false
- },
- {
- "label": "charmonitor",
- "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.monitor,server",
- "frontend-open": true
- },
- {
- "label": "charserial0",
- "filename": "pty:/dev/pts/2",
- "frontend-open": true
- }
- ]
- }
-
-query-chardev-backends
--------------
-
-List available character device backends.
-
-Each backend is represented by a json-object, the returned value is a json-array
-of all backends.
-
-Each json-object contains:
-
-- "name": backend name (json-string)
-
-Example:
-
--> { "execute": "query-chardev-backends" }
-<- {
- "return":[
- {
- "name":"udp"
- },
- {
- "name":"tcp"
- },
- {
- "name":"unix"
- },
- {
- "name":"spiceport"
- }
- ]
- }
-
-query-block
------------
-
-Show the block devices.
-
-Each block device information is stored in a json-object and the returned value
-is a json-array of all devices.
-
-Each json-object contain the following:
-
-- "device": device name (json-string)
-- "type": device type (json-string)
- - deprecated, retained for backward compatibility
- - Possible values: "unknown"
-- "removable": true if the device is removable, false otherwise (json-bool)
-- "locked": true if the device is locked, false otherwise (json-bool)
-- "tray_open": only present if removable, true if the device has a tray,
- and it is open (json-bool)
-- "inserted": only present if the device is inserted, it is a json-object
- containing the following:
- - "file": device file name (json-string)
- - "ro": true if read-only, false otherwise (json-bool)
- - "drv": driver format name (json-string)
- - Possible values: "blkdebug", "bochs", "cloop", "dmg",
- "file", "file", "ftp", "ftps", "host_cdrom",
- "host_device", "http", "https",
- "nbd", "parallels", "qcow", "qcow2", "raw",
- "vdi", "vmdk", "vpc", "vvfat"
- - "backing_file": backing file name (json-string, optional)
- - "backing_file_depth": number of files in the backing file chain (json-int)
- - "encrypted": true if encrypted, false otherwise (json-bool)
- - "bps": limit total bytes per second (json-int)
- - "bps_rd": limit read bytes per second (json-int)
- - "bps_wr": limit write bytes per second (json-int)
- - "iops": limit total I/O operations per second (json-int)
- - "iops_rd": limit read operations per second (json-int)
- - "iops_wr": limit write operations per second (json-int)
- - "bps_max": total max in bytes (json-int)
- - "bps_rd_max": read max in bytes (json-int)
- - "bps_wr_max": write max in bytes (json-int)
- - "iops_max": total I/O operations max (json-int)
- - "iops_rd_max": read I/O operations max (json-int)
- - "iops_wr_max": write I/O operations max (json-int)
- - "iops_size": I/O size when limiting by iops (json-int)
- - "detect_zeroes": detect and optimize zero writing (json-string)
- - Possible values: "off", "on", "unmap"
- - "write_threshold": write offset threshold in bytes, a event will be
- emitted if crossed. Zero if disabled (json-int)
- - "image": the detail of the image, it is a json-object containing
- the following:
- - "filename": image file name (json-string)
- - "format": image format (json-string)
- - "virtual-size": image capacity in bytes (json-int)
- - "dirty-flag": true if image is not cleanly closed, not present
- means clean (json-bool, optional)
- - "actual-size": actual size on disk in bytes of the image, not
- present when image does not support thin
- provision (json-int, optional)
- - "cluster-size": size of a cluster in bytes, not present if image
- format does not support it (json-int, optional)
- - "encrypted": true if the image is encrypted, not present means
- false or the image format does not support
- encryption (json-bool, optional)
- - "backing_file": backing file name, not present means no backing
- file is used or the image format does not
- support backing file chain
- (json-string, optional)
- - "full-backing-filename": full path of the backing file, not
- present if it equals backing_file or no
- backing file is used
- (json-string, optional)
- - "backing-filename-format": the format of the backing file, not
- present means unknown or no backing
- file (json-string, optional)
- - "snapshots": the internal snapshot info, it is an optional list
- of json-object containing the following:
- - "id": unique snapshot id (json-string)
- - "name": snapshot name (json-string)
- - "vm-state-size": size of the VM state in bytes (json-int)
- - "date-sec": UTC date of the snapshot in seconds (json-int)
- - "date-nsec": fractional part in nanoseconds to be used with
- date-sec (json-int)
- - "vm-clock-sec": VM clock relative to boot in seconds
- (json-int)
- - "vm-clock-nsec": fractional part in nanoseconds to be used
- with vm-clock-sec (json-int)
- - "backing-image": the detail of the backing image, it is an
- optional json-object only present when a
- backing image present for this image
-
-- "io-status": I/O operation status, only present if the device supports it
- and the VM is configured to stop on errors. It's always reset
- to "ok" when the "cont" command is issued (json_string, optional)
- - Possible values: "ok", "failed", "nospace"
-
-Example:
-
--> { "execute": "query-block" }
-<- {
- "return":[
- {
- "io-status": "ok",
- "device":"ide0-hd0",
- "locked":false,
- "removable":false,
- "inserted":{
- "ro":false,
- "drv":"qcow2",
- "encrypted":false,
- "file":"disks/test.qcow2",
- "backing_file_depth":1,
- "bps":1000000,
- "bps_rd":0,
- "bps_wr":0,
- "iops":1000000,
- "iops_rd":0,
- "iops_wr":0,
- "bps_max": 8000000,
- "bps_rd_max": 0,
- "bps_wr_max": 0,
- "iops_max": 0,
- "iops_rd_max": 0,
- "iops_wr_max": 0,
- "iops_size": 0,
- "detect_zeroes": "on",
- "write_threshold": 0,
- "image":{
- "filename":"disks/test.qcow2",
- "format":"qcow2",
- "virtual-size":2048000,
- "backing_file":"base.qcow2",
- "full-backing-filename":"disks/base.qcow2",
- "backing-filename-format":"qcow2",
- "snapshots":[
- {
- "id": "1",
- "name": "snapshot1",
- "vm-state-size": 0,
- "date-sec": 10000200,
- "date-nsec": 12,
- "vm-clock-sec": 206,
- "vm-clock-nsec": 30
- }
- ],
- "backing-image":{
- "filename":"disks/base.qcow2",
- "format":"qcow2",
- "virtual-size":2048000
- }
- }
- },
- "type":"unknown"
- },
- {
- "io-status": "ok",
- "device":"ide1-cd0",
- "locked":false,
- "removable":true,
- "type":"unknown"
- },
- {
- "device":"floppy0",
- "locked":false,
- "removable":true,
- "type":"unknown"
- },
- {
- "device":"sd0",
- "locked":false,
- "removable":true,
- "type":"unknown"
- }
- ]
- }
-
-query-blockstats
-----------------
-
-Show block device statistics.
-
-Each device statistic information is stored in a json-object and the returned
-value is a json-array of all devices.
-
-Each json-object contain the following:
-
-- "device": device name (json-string)
-- "stats": A json-object with the statistics information, it contains:
- - "rd_bytes": bytes read (json-int)
- - "wr_bytes": bytes written (json-int)
- - "rd_operations": read operations (json-int)
- - "wr_operations": write operations (json-int)
- - "flush_operations": cache flush operations (json-int)
- - "wr_total_time_ns": total time spend on writes in nano-seconds (json-int)
- - "rd_total_time_ns": total time spend on reads in nano-seconds (json-int)
- - "flush_total_time_ns": total time spend on cache flushes in nano-seconds (json-int)
- - "wr_highest_offset": The offset after the greatest byte written to the
- BlockDriverState since it has been opened (json-int)
- - "rd_merged": number of read requests that have been merged into
- another request (json-int)
- - "wr_merged": number of write requests that have been merged into
- another request (json-int)
- - "idle_time_ns": time since the last I/O operation, in
- nanoseconds. If the field is absent it means
- that there haven't been any operations yet
- (json-int, optional)
- - "failed_rd_operations": number of failed read operations
- (json-int)
- - "failed_wr_operations": number of failed write operations
- (json-int)
- - "failed_flush_operations": number of failed flush operations
- (json-int)
- - "invalid_rd_operations": number of invalid read operations
- (json-int)
- - "invalid_wr_operations": number of invalid write operations
- (json-int)
- - "invalid_flush_operations": number of invalid flush operations
- (json-int)
- - "account_invalid": whether invalid operations are included in
- the last access statistics (json-bool)
- - "account_failed": whether failed operations are included in the
- latency and last access statistics
- (json-bool)
- - "timed_stats": A json-array containing statistics collected in
- specific intervals, with the following members:
- - "interval_length": interval used for calculating the
- statistics, in seconds (json-int)
- - "min_rd_latency_ns": minimum latency of read operations in
- the defined interval, in nanoseconds
- (json-int)
- - "min_wr_latency_ns": minimum latency of write operations in
- the defined interval, in nanoseconds
- (json-int)
- - "min_flush_latency_ns": minimum latency of flush operations
- in the defined interval, in
- nanoseconds (json-int)
- - "max_rd_latency_ns": maximum latency of read operations in
- the defined interval, in nanoseconds
- (json-int)
- - "max_wr_latency_ns": maximum latency of write operations in
- the defined interval, in nanoseconds
- (json-int)
- - "max_flush_latency_ns": maximum latency of flush operations
- in the defined interval, in
- nanoseconds (json-int)
- - "avg_rd_latency_ns": average latency of read operations in
- the defined interval, in nanoseconds
- (json-int)
- - "avg_wr_latency_ns": average latency of write operations in
- the defined interval, in nanoseconds
- (json-int)
- - "avg_flush_latency_ns": average latency of flush operations
- in the defined interval, in
- nanoseconds (json-int)
- - "avg_rd_queue_depth": average number of pending read
- operations in the defined interval
- (json-number)
- - "avg_wr_queue_depth": average number of pending write
- operations in the defined interval
- (json-number).
-- "parent": Contains recursively the statistics of the underlying
- protocol (e.g. the host file for a qcow2 image). If there is
- no underlying protocol, this field is omitted
- (json-object, optional)
-
-Example:
-
--> { "execute": "query-blockstats" }
-<- {
- "return":[
- {
- "device":"ide0-hd0",
- "parent":{
- "stats":{
- "wr_highest_offset":3686448128,
- "wr_bytes":9786368,
- "wr_operations":751,
- "rd_bytes":122567168,
- "rd_operations":36772
- "wr_total_times_ns":313253456
- "rd_total_times_ns":3465673657
- "flush_total_times_ns":49653
- "flush_operations":61,
- "rd_merged":0,
- "wr_merged":0,
- "idle_time_ns":2953431879,
- "account_invalid":true,
- "account_failed":false
- }
- },
- "stats":{
- "wr_highest_offset":2821110784,
- "wr_bytes":9786368,
- "wr_operations":692,
- "rd_bytes":122739200,
- "rd_operations":36604
- "flush_operations":51,
- "wr_total_times_ns":313253456
- "rd_total_times_ns":3465673657
- "flush_total_times_ns":49653,
- "rd_merged":0,
- "wr_merged":0,
- "idle_time_ns":2953431879,
- "account_invalid":true,
- "account_failed":false
- }
- },
- {
- "device":"ide1-cd0",
- "stats":{
- "wr_highest_offset":0,
- "wr_bytes":0,
- "wr_operations":0,
- "rd_bytes":0,
- "rd_operations":0
- "flush_operations":0,
- "wr_total_times_ns":0
- "rd_total_times_ns":0
- "flush_total_times_ns":0,
- "rd_merged":0,
- "wr_merged":0,
- "account_invalid":false,
- "account_failed":false
- }
- },
- {
- "device":"floppy0",
- "stats":{
- "wr_highest_offset":0,
- "wr_bytes":0,
- "wr_operations":0,
- "rd_bytes":0,
- "rd_operations":0
- "flush_operations":0,
- "wr_total_times_ns":0
- "rd_total_times_ns":0
- "flush_total_times_ns":0,
- "rd_merged":0,
- "wr_merged":0,
- "account_invalid":false,
- "account_failed":false
- }
- },
- {
- "device":"sd0",
- "stats":{
- "wr_highest_offset":0,
- "wr_bytes":0,
- "wr_operations":0,
- "rd_bytes":0,
- "rd_operations":0
- "flush_operations":0,
- "wr_total_times_ns":0
- "rd_total_times_ns":0
- "flush_total_times_ns":0,
- "rd_merged":0,
- "wr_merged":0,
- "account_invalid":false,
- "account_failed":false
- }
- }
- ]
- }
-
-query-cpus
-----------
-
-Show CPU information.
-
-Return a json-array. Each CPU is represented by a json-object, which contains:
-
-- "CPU": CPU index (json-int)
-- "current": true if this is the current CPU, false otherwise (json-bool)
-- "halted": true if the cpu is halted, false otherwise (json-bool)
-- "qom_path": path to the CPU object in the QOM tree (json-str)
-- "arch": architecture of the cpu, which determines what additional
- keys will be present (json-str)
-- Current program counter. The key's name depends on the architecture:
- "pc": i386/x86_64 (json-int)
- "nip": PPC (json-int)
- "pc" and "npc": sparc (json-int)
- "PC": mips (json-int)
-- "thread_id": ID of the underlying host thread (json-int)
-
-Example:
-
--> { "execute": "query-cpus" }
-<- {
- "return":[
- {
- "CPU":0,
- "current":true,
- "halted":false,
- "qom_path":"/machine/unattached/device[0]",
- "arch":"x86",
- "pc":3227107138,
- "thread_id":3134
- },
- {
- "CPU":1,
- "current":false,
- "halted":true,
- "qom_path":"/machine/unattached/device[2]",
- "arch":"x86",
- "pc":7108165,
- "thread_id":3135
- }
- ]
- }
-
-query-iothreads
----------------
-
-Returns a list of information about each iothread.
-
-Note this list excludes the QEMU main loop thread, which is not declared
-using the -object iothread command-line option. It is always the main thread
-of the process.
-
-Return a json-array. Each iothread is represented by a json-object, which contains:
-
-- "id": name of iothread (json-str)
-- "thread-id": ID of the underlying host thread (json-int)
-
-Example:
-
--> { "execute": "query-iothreads" }
-<- {
- "return":[
- {
- "id":"iothread0",
- "thread-id":3134
- },
- {
- "id":"iothread1",
- "thread-id":3135
- }
- ]
- }
-
-query-pci
----------
-
-PCI buses and devices information.
-
-The returned value is a json-array of all buses. Each bus is represented by
-a json-object, which has a key with a json-array of all PCI devices attached
-to it. Each device is represented by a json-object.
-
-The bus json-object contains the following:
-
-- "bus": bus number (json-int)
-- "devices": a json-array of json-objects, each json-object represents a
- PCI device
-
-The PCI device json-object contains the following:
-
-- "bus": identical to the parent's bus number (json-int)
-- "slot": slot number (json-int)
-- "function": function number (json-int)
-- "class_info": a json-object containing:
- - "desc": device class description (json-string, optional)
- - "class": device class number (json-int)
-- "id": a json-object containing:
- - "device": device ID (json-int)
- - "vendor": vendor ID (json-int)
-- "irq": device's IRQ if assigned (json-int, optional)
-- "qdev_id": qdev id string (json-string)
-- "pci_bridge": It's a json-object, only present if this device is a
- PCI bridge, contains:
- - "bus": bus number (json-int)
- - "secondary": secondary bus number (json-int)
- - "subordinate": subordinate bus number (json-int)
- - "io_range": I/O memory range information, a json-object with the
- following members:
- - "base": base address, in bytes (json-int)
- - "limit": limit address, in bytes (json-int)
- - "memory_range": memory range information, a json-object with the
- following members:
- - "base": base address, in bytes (json-int)
- - "limit": limit address, in bytes (json-int)
- - "prefetchable_range": Prefetchable memory range information, a
- json-object with the following members:
- - "base": base address, in bytes (json-int)
- - "limit": limit address, in bytes (json-int)
- - "devices": a json-array of PCI devices if there's any attached, each
- each element is represented by a json-object, which contains
- the same members of the 'PCI device json-object' described
- above (optional)
-- "regions": a json-array of json-objects, each json-object represents a
- memory region of this device
-
-The memory range json-object contains the following:
-
-- "base": base memory address (json-int)
-- "limit": limit value (json-int)
-
-The region json-object can be an I/O region or a memory region, an I/O region
-json-object contains the following:
-
-- "type": "io" (json-string, fixed)
-- "bar": BAR number (json-int)
-- "address": memory address (json-int)
-- "size": memory size (json-int)
-
-A memory region json-object contains the following:
-
-- "type": "memory" (json-string, fixed)
-- "bar": BAR number (json-int)
-- "address": memory address (json-int)
-- "size": memory size (json-int)
-- "mem_type_64": true or false (json-bool)
-- "prefetch": true or false (json-bool)
-
-Example:
-
--> { "execute": "query-pci" }
-<- {
- "return":[
- {
- "bus":0,
- "devices":[
- {
- "bus":0,
- "qdev_id":"",
- "slot":0,
- "class_info":{
- "class":1536,
- "desc":"Host bridge"
- },
- "id":{
- "device":32902,
- "vendor":4663
- },
- "function":0,
- "regions":[
-
- ]
- },
- {
- "bus":0,
- "qdev_id":"",
- "slot":1,
- "class_info":{
- "class":1537,
- "desc":"ISA bridge"
- },
- "id":{
- "device":32902,
- "vendor":28672
- },
- "function":0,
- "regions":[
-
- ]
- },
- {
- "bus":0,
- "qdev_id":"",
- "slot":1,
- "class_info":{
- "class":257,
- "desc":"IDE controller"
- },
- "id":{
- "device":32902,
- "vendor":28688
- },
- "function":1,
- "regions":[
- {
- "bar":4,
- "size":16,
- "address":49152,
- "type":"io"
- }
- ]
- },
- {
- "bus":0,
- "qdev_id":"",
- "slot":2,
- "class_info":{
- "class":768,
- "desc":"VGA controller"
- },
- "id":{
- "device":4115,
- "vendor":184
- },
- "function":0,
- "regions":[
- {
- "prefetch":true,
- "mem_type_64":false,
- "bar":0,
- "size":33554432,
- "address":4026531840,
- "type":"memory"
- },
- {
- "prefetch":false,
- "mem_type_64":false,
- "bar":1,
- "size":4096,
- "address":4060086272,
- "type":"memory"
- },
- {
- "prefetch":false,
- "mem_type_64":false,
- "bar":6,
- "size":65536,
- "address":-1,
- "type":"memory"
- }
- ]
- },
- {
- "bus":0,
- "qdev_id":"",
- "irq":11,
- "slot":4,
- "class_info":{
- "class":1280,
- "desc":"RAM controller"
- },
- "id":{
- "device":6900,
- "vendor":4098
- },
- "function":0,
- "regions":[
- {
- "bar":0,
- "size":32,
- "address":49280,
- "type":"io"
- }
- ]
- }
- ]
- }
- ]
- }
-
-Note: This example has been shortened as the real response is too long.
-
-query-kvm
----------
-
-Show KVM information.
-
-Return a json-object with the following information:
-
-- "enabled": true if KVM support is enabled, false otherwise (json-bool)
-- "present": true if QEMU has KVM support, false otherwise (json-bool)
-
-Example:
-
--> { "execute": "query-kvm" }
-<- { "return": { "enabled": true, "present": true } }
-
-query-status
-------------
-
-Return a json-object with the following information:
-
-- "running": true if the VM is running, or false if it is paused (json-bool)
-- "singlestep": true if the VM is in single step mode,
- false otherwise (json-bool)
-- "status": one of the following values (json-string)
- "debug" - QEMU is running on a debugger
- "inmigrate" - guest is paused waiting for an incoming migration
- "internal-error" - An internal error that prevents further guest
- execution has occurred
- "io-error" - the last IOP has failed and the device is configured
- to pause on I/O errors
- "paused" - guest has been paused via the 'stop' command
- "postmigrate" - guest is paused following a successful 'migrate'
- "prelaunch" - QEMU was started with -S and guest has not started
- "finish-migrate" - guest is paused to finish the migration process
- "restore-vm" - guest is paused to restore VM state
- "running" - guest is actively running
- "save-vm" - guest is paused to save the VM state
- "shutdown" - guest is shut down (and -no-shutdown is in use)
- "watchdog" - the watchdog action is configured to pause and
- has been triggered
-
-Example:
-
--> { "execute": "query-status" }
-<- { "return": { "running": true, "singlestep": false, "status": "running" } }
-
-query-mice
-----------
-
-Show VM mice information.
-
-Each mouse is represented by a json-object, the returned value is a json-array
-of all mice.
-
-The mouse json-object contains the following:
-
-- "name": mouse's name (json-string)
-- "index": mouse's index (json-int)
-- "current": true if this mouse is receiving events, false otherwise (json-bool)
-- "absolute": true if the mouse generates absolute input events (json-bool)
-
-Example:
-
--> { "execute": "query-mice" }
-<- {
- "return":[
- {
- "name":"QEMU Microsoft Mouse",
- "index":0,
- "current":false,
- "absolute":false
- },
- {
- "name":"QEMU PS/2 Mouse",
- "index":1,
- "current":true,
- "absolute":true
- }
- ]
- }
-
-query-vnc
----------
-
-Show VNC server information.
-
-Return a json-object with server information. Connected clients are returned
-as a json-array of json-objects.
-
-The main json-object contains the following:
-
-- "enabled": true or false (json-bool)
-- "host": server's IP address (json-string)
-- "family": address family (json-string)
- - Possible values: "ipv4", "ipv6", "unix", "unknown"
-- "service": server's port number (json-string)
-- "auth": authentication method (json-string)
- - Possible values: "invalid", "none", "ra2", "ra2ne", "sasl", "tight",
- "tls", "ultra", "unknown", "vencrypt", "vencrypt",
- "vencrypt+plain", "vencrypt+tls+none",
- "vencrypt+tls+plain", "vencrypt+tls+sasl",
- "vencrypt+tls+vnc", "vencrypt+x509+none",
- "vencrypt+x509+plain", "vencrypt+x509+sasl",
- "vencrypt+x509+vnc", "vnc"
-- "clients": a json-array of all connected clients
-
-Clients are described by a json-object, each one contain the following:
-
-- "host": client's IP address (json-string)
-- "family": address family (json-string)
- - Possible values: "ipv4", "ipv6", "unix", "unknown"
-- "service": client's port number (json-string)
-- "x509_dname": TLS dname (json-string, optional)
-- "sasl_username": SASL username (json-string, optional)
-
-Example:
-
--> { "execute": "query-vnc" }
-<- {
- "return":{
- "enabled":true,
- "host":"0.0.0.0",
- "service":"50402",
- "auth":"vnc",
- "family":"ipv4",
- "clients":[
- {
- "host":"127.0.0.1",
- "service":"50401",
- "family":"ipv4"
- }
- ]
- }
- }
-
-query-spice
------------
-
-Show SPICE server information.
-
-Return a json-object with server information. Connected clients are returned
-as a json-array of json-objects.
-
-The main json-object contains the following:
-
-- "enabled": true or false (json-bool)
-- "host": server's IP address (json-string)
-- "port": server's port number (json-int, optional)
-- "tls-port": server's port number (json-int, optional)
-- "auth": authentication method (json-string)
- - Possible values: "none", "spice"
-- "channels": a json-array of all active channels clients
-
-Channels are described by a json-object, each one contain the following:
-
-- "host": client's IP address (json-string)
-- "family": address family (json-string)
- - Possible values: "ipv4", "ipv6", "unix", "unknown"
-- "port": client's port number (json-string)
-- "connection-id": spice connection id. All channels with the same id
- belong to the same spice session (json-int)
-- "channel-type": channel type. "1" is the main control channel, filter for
- this one if you want track spice sessions only (json-int)
-- "channel-id": channel id. Usually "0", might be different needed when
- multiple channels of the same type exist, such as multiple
- display channels in a multihead setup (json-int)
-- "tls": whether the channel is encrypted (json-bool)
-
-Example:
-
--> { "execute": "query-spice" }
-<- {
- "return": {
- "enabled": true,
- "auth": "spice",
- "port": 5920,
- "tls-port": 5921,
- "host": "0.0.0.0",
- "channels": [
- {
- "port": "54924",
- "family": "ipv4",
- "channel-type": 1,
- "connection-id": 1804289383,
- "host": "127.0.0.1",
- "channel-id": 0,
- "tls": true
- },
- {
- "port": "36710",
- "family": "ipv4",
- "channel-type": 4,
- "connection-id": 1804289383,
- "host": "127.0.0.1",
- "channel-id": 0,
- "tls": false
- },
- [ ... more channels follow ... ]
- ]
- }
- }
-
-query-name
-----------
-
-Show VM name.
-
-Return a json-object with the following information:
-
-- "name": VM's name (json-string, optional)
-
-Example:
-
--> { "execute": "query-name" }
-<- { "return": { "name": "qemu-name" } }
-
-query-uuid
-----------
-
-Show VM UUID.
-
-Return a json-object with the following information:
-
-- "UUID": Universally Unique Identifier (json-string)
-
-Example:
-
--> { "execute": "query-uuid" }
-<- { "return": { "UUID": "550e8400-e29b-41d4-a716-446655440000" } }
-
-query-command-line-options
---------------------------
-
-Show command line option schema.
-
-Return a json-array of command line option schema for all options (or for
-the given option), returning an error if the given option doesn't exist.
-
-Each array entry contains the following:
-
-- "option": option name (json-string)
-- "parameters": a json-array describes all parameters of the option:
- - "name": parameter name (json-string)
- - "type": parameter type (one of 'string', 'boolean', 'number',
- or 'size')
- - "help": human readable description of the parameter
- (json-string, optional)
- - "default": default value string for the parameter
- (json-string, optional)
-
-Example:
-
--> { "execute": "query-command-line-options", "arguments": { "option": "option-rom" } }
-<- { "return": [
- {
- "parameters": [
- {
- "name": "romfile",
- "type": "string"
- },
- {
- "name": "bootindex",
- "type": "number"
- }
- ],
- "option": "option-rom"
- }
- ]
- }
-
-query-migrate
--------------
-
-Migration status.
-
-Return a json-object. If migration is active there will be another json-object
-with RAM migration status and if block migration is active another one with
-block migration status.
-
-The main json-object contains the following:
-
-- "status": migration status (json-string)
- - Possible values: "setup", "active", "completed", "failed", "cancelled"
-- "total-time": total amount of ms since migration started. If
- migration has ended, it returns the total migration
- time (json-int)
-- "setup-time" amount of setup time in milliseconds _before_ the
- iterations begin but _after_ the QMP command is issued.
- This is designed to provide an accounting of any activities
- (such as RDMA pinning) which may be expensive, but do not
- actually occur during the iterative migration rounds
- themselves. (json-int)
-- "downtime": only present when migration has finished correctly
- total amount in ms for downtime that happened (json-int)
-- "expected-downtime": only present while migration is active
- total amount in ms for downtime that was calculated on
- the last bitmap round (json-int)
-- "ram": only present if "status" is "active", it is a json-object with the
- following RAM information:
- - "transferred": amount transferred in bytes (json-int)
- - "remaining": amount remaining to transfer in bytes (json-int)
- - "total": total amount of memory in bytes (json-int)
- - "duplicate": number of pages filled entirely with the same
- byte (json-int)
- These are sent over the wire much more efficiently.
- - "skipped": number of skipped zero pages (json-int)
- - "normal" : number of whole pages transferred. I.e. they
- were not sent as duplicate or xbzrle pages (json-int)
- - "normal-bytes" : number of bytes transferred in whole
- pages. This is just normal pages times size of one page,
- but this way upper levels don't need to care about page
- size (json-int)
- - "dirty-sync-count": times that dirty ram was synchronized (json-int)
-- "disk": only present if "status" is "active" and it is a block migration,
- it is a json-object with the following disk information:
- - "transferred": amount transferred in bytes (json-int)
- - "remaining": amount remaining to transfer in bytes json-int)
- - "total": total disk size in bytes (json-int)
-- "xbzrle-cache": only present if XBZRLE is active.
- It is a json-object with the following XBZRLE information:
- - "cache-size": XBZRLE cache size in bytes
- - "bytes": number of bytes transferred for XBZRLE compressed pages
- - "pages": number of XBZRLE compressed pages
- - "cache-miss": number of XBRZRLE page cache misses
- - "cache-miss-rate": rate of XBRZRLE page cache misses
- - "overflow": number of times XBZRLE overflows. This means
- that the XBZRLE encoding was bigger than just sent the
- whole page, and then we sent the whole page instead (as as
- normal page).
-
-Examples:
-
-1. Before the first migration
-
--> { "execute": "query-migrate" }
-<- { "return": {} }
-
-2. Migration is done and has succeeded
-
--> { "execute": "query-migrate" }
-<- { "return": {
- "status": "completed",
- "ram":{
- "transferred":123,
- "remaining":123,
- "total":246,
- "total-time":12345,
- "setup-time":12345,
- "downtime":12345,
- "duplicate":123,
- "normal":123,
- "normal-bytes":123456,
- "dirty-sync-count":15
- }
- }
- }
-
-3. Migration is done and has failed
-
--> { "execute": "query-migrate" }
-<- { "return": { "status": "failed" } }
-
-4. Migration is being performed and is not a block migration:
-
--> { "execute": "query-migrate" }
-<- {
- "return":{
- "status":"active",
- "ram":{
- "transferred":123,
- "remaining":123,
- "total":246,
- "total-time":12345,
- "setup-time":12345,
- "expected-downtime":12345,
- "duplicate":123,
- "normal":123,
- "normal-bytes":123456,
- "dirty-sync-count":15
- }
- }
- }
-
-5. Migration is being performed and is a block migration:
-
--> { "execute": "query-migrate" }
-<- {
- "return":{
- "status":"active",
- "ram":{
- "total":1057024,
- "remaining":1053304,
- "transferred":3720,
- "total-time":12345,
- "setup-time":12345,
- "expected-downtime":12345,
- "duplicate":123,
- "normal":123,
- "normal-bytes":123456,
- "dirty-sync-count":15
- },
- "disk":{
- "total":20971520,
- "remaining":20880384,
- "transferred":91136
- }
- }
- }
-
-6. Migration is being performed and XBZRLE is active:
-
--> { "execute": "query-migrate" }
-<- {
- "return":{
- "status":"active",
- "capabilities" : [ { "capability": "xbzrle", "state" : true } ],
- "ram":{
- "total":1057024,
- "remaining":1053304,
- "transferred":3720,
- "total-time":12345,
- "setup-time":12345,
- "expected-downtime":12345,
- "duplicate":10,
- "normal":3333,
- "normal-bytes":3412992,
- "dirty-sync-count":15
- },
- "xbzrle-cache":{
- "cache-size":67108864,
- "bytes":20971520,
- "pages":2444343,
- "cache-miss":2244,
- "cache-miss-rate":0.123,
- "overflow":34434
- }
- }
- }
-
-migrate-set-capabilities
-------------------------
-
-Enable/Disable migration capabilities
-
-- "xbzrle": XBZRLE support
-- "rdma-pin-all": pin all pages when using RDMA during migration
-- "auto-converge": throttle down guest to help convergence of migration
-- "zero-blocks": compress zero blocks during block migration
-- "compress": use multiple compression threads to accelerate live migration
-- "events": generate events for each migration state change
-- "postcopy-ram": postcopy mode for live migration
-- "x-colo": COarse-Grain LOck Stepping (COLO) for Non-stop Service
-
-Arguments:
-
-Example:
-
--> { "execute": "migrate-set-capabilities" , "arguments":
- { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
-
-query-migrate-capabilities
---------------------------
-
-Query current migration capabilities
-
-- "capabilities": migration capabilities state
- - "xbzrle" : XBZRLE state (json-bool)
- - "rdma-pin-all" : RDMA Pin Page state (json-bool)
- - "auto-converge" : Auto Converge state (json-bool)
- - "zero-blocks" : Zero Blocks state (json-bool)
- - "compress": Multiple compression threads state (json-bool)
- - "events": Migration state change event state (json-bool)
- - "postcopy-ram": postcopy ram state (json-bool)
- - "x-colo": COarse-Grain LOck Stepping for Non-stop Service (json-bool)
-
-Arguments:
-
-Example:
-
--> { "execute": "query-migrate-capabilities" }
-<- {"return": [
- {"state": false, "capability": "xbzrle"},
- {"state": false, "capability": "rdma-pin-all"},
- {"state": false, "capability": "auto-converge"},
- {"state": false, "capability": "zero-blocks"},
- {"state": false, "capability": "compress"},
- {"state": true, "capability": "events"},
- {"state": false, "capability": "postcopy-ram"},
- {"state": false, "capability": "x-colo"}
- ]}
-
-migrate-set-parameters
-----------------------
-
-Set migration parameters
-
-- "compress-level": set compression level during migration (json-int)
-- "compress-threads": set compression thread count for migration (json-int)
-- "decompress-threads": set decompression thread count for migration (json-int)
-- "cpu-throttle-initial": set initial percentage of time guest cpus are
- throttled for auto-converge (json-int)
-- "cpu-throttle-increment": set throttle increasing percentage for
- auto-converge (json-int)
-- "max-bandwidth": set maximum speed for migrations (in bytes/sec) (json-int)
-- "downtime-limit": set maximum tolerated downtime (in milliseconds) for
- migrations (json-int)
-- "x-checkpoint-delay": set the delay time for periodic checkpoint (json-int)
-
-Arguments:
-
-Example:
-
--> { "execute": "migrate-set-parameters" , "arguments":
- { "compress-level": 1 } }
-
-query-migrate-parameters
-------------------------
-
-Query current migration parameters
-
-- "parameters": migration parameters value
- - "compress-level" : compression level value (json-int)
- - "compress-threads" : compression thread count value (json-int)
- - "decompress-threads" : decompression thread count value (json-int)
- - "cpu-throttle-initial" : initial percentage of time guest cpus are
- throttled (json-int)
- - "cpu-throttle-increment" : throttle increasing percentage for
- auto-converge (json-int)
- - "max-bandwidth" : maximium migration speed in bytes per second
- (json-int)
- - "downtime-limit" : maximum tolerated downtime of migration in
- milliseconds (json-int)
-Arguments:
-
-Example:
-
--> { "execute": "query-migrate-parameters" }
-<- {
- "return": {
- "decompress-threads": 2,
- "cpu-throttle-increment": 10,
- "compress-threads": 8,
- "compress-level": 1,
- "cpu-throttle-initial": 20,
- "max-bandwidth": 33554432,
- "downtime-limit": 300
- }
- }
-
-query-balloon
--------------
-
-Show balloon information.
-
-Make an asynchronous request for balloon info. When the request completes a
-json-object will be returned containing the following data:
-
-- "actual": current balloon value in bytes (json-int)
-
-Example:
-
--> { "execute": "query-balloon" }
-<- {
- "return":{
- "actual":1073741824,
- }
- }
-
-query-tpm
----------
-
-Return information about the TPM device.
-
-Arguments: None
-
-Example:
-
--> { "execute": "query-tpm" }
-<- { "return":
- [
- { "model": "tpm-tis",
- "options":
- { "type": "passthrough",
- "data":
- { "cancel-path": "/sys/class/misc/tpm0/device/cancel",
- "path": "/dev/tpm0"
- }
- },
- "id": "tpm0"
- }
- ]
- }
-
-query-tpm-models
-----------------
-
-Return a list of supported TPM models.
-
-Arguments: None
-
-Example:
-
--> { "execute": "query-tpm-models" }
-<- { "return": [ "tpm-tis" ] }
-
-query-tpm-types
----------------
-
-Return a list of supported TPM types.
-
-Arguments: None
-
-Example:
-
--> { "execute": "query-tpm-types" }
-<- { "return": [ "passthrough" ] }
-
-chardev-add
-----------------
-
-Add a chardev.
-
-Arguments:
-
-- "id": the chardev's ID, must be unique (json-string)
-- "backend": chardev backend type + parameters
-
-Examples:
-
--> { "execute" : "chardev-add",
- "arguments" : { "id" : "foo",
- "backend" : { "type" : "null", "data" : {} } } }
-<- { "return": {} }
-
--> { "execute" : "chardev-add",
- "arguments" : { "id" : "bar",
- "backend" : { "type" : "file",
- "data" : { "out" : "/tmp/bar.log" } } } }
-<- { "return": {} }
-
--> { "execute" : "chardev-add",
- "arguments" : { "id" : "baz",
- "backend" : { "type" : "pty", "data" : {} } } }
-<- { "return": { "pty" : "/dev/pty/42" } }
-
-chardev-remove
---------------
-
-Remove a chardev.
-
-Arguments:
-
-- "id": the chardev's ID, must exist and not be in use (json-string)
-
-Example:
-
--> { "execute": "chardev-remove", "arguments": { "id" : "foo" } }
-<- { "return": {} }
-
-query-rx-filter
----------------
-
-Show rx-filter information.
-
-Returns a json-array of rx-filter information for all NICs (or for the
-given NIC), returning an error if the given NIC doesn't exist, or
-given NIC doesn't support rx-filter querying, or given net client
-isn't a NIC.
-
-The query will clear the event notification flag of each NIC, then qemu
-will start to emit event to QMP monitor.
-
-Each array entry contains the following:
-
-- "name": net client name (json-string)
-- "promiscuous": promiscuous mode is enabled (json-bool)
-- "multicast": multicast receive state (one of 'normal', 'none', 'all')
-- "unicast": unicast receive state (one of 'normal', 'none', 'all')
-- "vlan": vlan receive state (one of 'normal', 'none', 'all') (Since 2.0)
-- "broadcast-allowed": allow to receive broadcast (json-bool)
-- "multicast-overflow": multicast table is overflowed (json-bool)
-- "unicast-overflow": unicast table is overflowed (json-bool)
-- "main-mac": main macaddr string (json-string)
-- "vlan-table": a json-array of active vlan id
-- "unicast-table": a json-array of unicast macaddr string
-- "multicast-table": a json-array of multicast macaddr string
-
-Example:
-
--> { "execute": "query-rx-filter", "arguments": { "name": "vnet0" } }
-<- { "return": [
- {
- "promiscuous": true,
- "name": "vnet0",
- "main-mac": "52:54:00:12:34:56",
- "unicast": "normal",
- "vlan": "normal",
- "vlan-table": [
- 4,
- 0
- ],
- "unicast-table": [
- ],
- "multicast": "normal",
- "multicast-overflow": false,
- "unicast-overflow": false,
- "multicast-table": [
- "01:00:5e:00:00:01",
- "33:33:00:00:00:01",
- "33:33:ff:12:34:56"
- ],
- "broadcast-allowed": false
- }
- ]
- }
-
-blockdev-add
-------------
-
-Add a block device.
-
-This command is still a work in progress. It doesn't support all
-block drivers among other things. Stay away from it unless you want
-to help with its development.
-
-For the arguments, see the QAPI schema documentation of BlockdevOptions.
-
-Example (1):
-
--> { "execute": "blockdev-add",
- "arguments": { "driver": "qcow2",
- "file": { "driver": "file",
- "filename": "test.qcow2" } } }
-<- { "return": {} }
-
-Example (2):
-
--> { "execute": "blockdev-add",
- "arguments": {
- "driver": "qcow2",
- "node-name": "my_disk",
- "discard": "unmap",
- "cache": {
- "direct": true,
- "writeback": true
- },
- "file": {
- "driver": "file",
- "filename": "/tmp/test.qcow2"
- },
- "backing": {
- "driver": "raw",
- "file": {
- "driver": "file",
- "filename": "/dev/fdset/4"
- }
- }
- }
- }
-
-<- { "return": {} }
-
-x-blockdev-del
-------------
-Since 2.5
-
-Deletes a block device that has been added using blockdev-add.
-The command will fail if the node is attached to a device or is
-otherwise being used.
-
-This command is still a work in progress and is considered
-experimental. Stay away from it unless you want to help with its
-development.
-
-Arguments:
-
-- "node-name": Name of the graph node to delete (json-string)
-
-Example:
-
--> { "execute": "blockdev-add",
- "arguments": {
- "driver": "qcow2",
- "node-name": "node0",
- "file": {
- "driver": "file",
- "filename": "test.qcow2"
- }
- }
- }
-
-<- { "return": {} }
-
--> { "execute": "x-blockdev-del",
- "arguments": { "node-name": "node0" }
- }
-<- { "return": {} }
-
-blockdev-open-tray
-------------------
-
-Opens a block device's tray. If there is a block driver state tree inserted as a
-medium, it will become inaccessible to the guest (but it will remain associated
-to the block device, so closing the tray will make it accessible again).
-
-If the tray was already open before, this will be a no-op.
-
-Once the tray opens, a DEVICE_TRAY_MOVED event is emitted. There are cases in
-which no such event will be generated, these include:
-- if the guest has locked the tray, @force is false and the guest does not
- respond to the eject request
-- if the BlockBackend denoted by @device does not have a guest device attached
- to it
-- if the guest device does not have an actual tray and is empty, for instance
- for floppy disk drives
-
-Arguments:
-
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-- "force": if false (the default), an eject request will be sent to the guest if
- it has locked the tray (and the tray will not be opened immediately);
- if true, the tray will be opened regardless of whether it is locked
- (json-bool, optional)
-
-Example:
-
--> { "execute": "blockdev-open-tray",
- "arguments": { "id": "ide0-1-0" } }
-
-<- { "timestamp": { "seconds": 1418751016,
- "microseconds": 716996 },
- "event": "DEVICE_TRAY_MOVED",
- "data": { "device": "ide1-cd0",
- "id": "ide0-1-0",
- "tray-open": true } }
-
-<- { "return": {} }
-
-blockdev-close-tray
--------------------
-
-Closes a block device's tray. If there is a block driver state tree associated
-with the block device (which is currently ejected), that tree will be loaded as
-the medium.
-
-If the tray was already closed before, this will be a no-op.
-
-Arguments:
-
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-
-Example:
-
--> { "execute": "blockdev-close-tray",
- "arguments": { "id": "ide0-1-0" } }
-
-<- { "timestamp": { "seconds": 1418751345,
- "microseconds": 272147 },
- "event": "DEVICE_TRAY_MOVED",
- "data": { "device": "ide1-cd0",
- "id": "ide0-1-0",
- "tray-open": false } }
-
-<- { "return": {} }
-
-x-blockdev-remove-medium
-------------------------
-
-Removes a medium (a block driver state tree) from a block device. That block
-device's tray must currently be open (unless there is no attached guest device).
-
-If the tray is open and there is no medium inserted, this will be a no-op.
-
-This command is still a work in progress and is considered experimental.
-Stay away from it unless you want to help with its development.
-
-Arguments:
-
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-
-Example:
-
--> { "execute": "x-blockdev-remove-medium",
- "arguments": { "id": "ide0-1-0" } }
-
-<- { "error": { "class": "GenericError",
- "desc": "Tray of device 'ide0-1-0' is not open" } }
-
--> { "execute": "blockdev-open-tray",
- "arguments": { "id": "ide0-1-0" } }
-
-<- { "timestamp": { "seconds": 1418751627,
- "microseconds": 549958 },
- "event": "DEVICE_TRAY_MOVED",
- "data": { "device": "ide1-cd0",
- "id": "ide0-1-0",
- "tray-open": true } }
-
-<- { "return": {} }
-
--> { "execute": "x-blockdev-remove-medium",
- "arguments": { "device": "ide0-1-0" } }
-
-<- { "return": {} }
-
-x-blockdev-insert-medium
-------------------------
-
-Inserts a medium (a block driver state tree) into a block device. That block
-device's tray must currently be open (unless there is no attached guest device)
-and there must be no medium inserted already.
-
-This command is still a work in progress and is considered experimental.
-Stay away from it unless you want to help with its development.
-
-Arguments:
-
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-- "node-name": root node of the BDS tree to insert into the block device
-
-Example:
-
--> { "execute": "blockdev-add",
- "arguments": { { "node-name": "node0",
- "driver": "raw",
- "file": { "driver": "file",
- "filename": "fedora.iso" } } }
-
-<- { "return": {} }
-
--> { "execute": "x-blockdev-insert-medium",
- "arguments": { "id": "ide0-1-0",
- "node-name": "node0" } }
-
-<- { "return": {} }
-
-x-blockdev-change
------------------
-
-Dynamically reconfigure the block driver state graph. It can be used
-to add, remove, insert or replace a graph node. Currently only the
-Quorum driver implements this feature to add or remove its child. This
-is useful to fix a broken quorum child.
-
-If @node is specified, it will be inserted under @parent. @child
-may not be specified in this case. If both @parent and @child are
-specified but @node is not, @child will be detached from @parent.
-
-Arguments:
-- "parent": the id or name of the parent node (json-string)
-- "child": the name of a child under the given parent node (json-string, optional)
-- "node": the name of the node that will be added (json-string, optional)
-
-Note: this command is experimental, and not a stable API. It doesn't
-support all kinds of operations, all kinds of children, nor all block
-drivers.
-
-Warning: The data in a new quorum child MUST be consistent with that of
-the rest of the array.
-
-Example:
-
-Add a new node to a quorum
--> { "execute": "blockdev-add",
- "arguments": { "driver": "raw",
- "node-name": "new_node",
- "file": { "driver": "file",
- "filename": "test.raw" } } }
-<- { "return": {} }
--> { "execute": "x-blockdev-change",
- "arguments": { "parent": "disk1",
- "node": "new_node" } }
-<- { "return": {} }
-
-Delete a quorum's node
--> { "execute": "x-blockdev-change",
- "arguments": { "parent": "disk1",
- "child": "children.1" } }
-<- { "return": {} }
-
-query-named-block-nodes
------------------------
-
-Return a list of BlockDeviceInfo for all the named block driver nodes
-
-Example:
-
--> { "execute": "query-named-block-nodes" }
-<- { "return": [ { "ro":false,
- "drv":"qcow2",
- "encrypted":false,
- "file":"disks/test.qcow2",
- "node-name": "my-node",
- "backing_file_depth":1,
- "bps":1000000,
- "bps_rd":0,
- "bps_wr":0,
- "iops":1000000,
- "iops_rd":0,
- "iops_wr":0,
- "bps_max": 8000000,
- "bps_rd_max": 0,
- "bps_wr_max": 0,
- "iops_max": 0,
- "iops_rd_max": 0,
- "iops_wr_max": 0,
- "iops_size": 0,
- "write_threshold": 0,
- "image":{
- "filename":"disks/test.qcow2",
- "format":"qcow2",
- "virtual-size":2048000,
- "backing_file":"base.qcow2",
- "full-backing-filename":"disks/base.qcow2",
- "backing-filename-format":"qcow2",
- "snapshots":[
- {
- "id": "1",
- "name": "snapshot1",
- "vm-state-size": 0,
- "date-sec": 10000200,
- "date-nsec": 12,
- "vm-clock-sec": 206,
- "vm-clock-nsec": 30
- }
- ],
- "backing-image":{
- "filename":"disks/base.qcow2",
- "format":"qcow2",
- "virtual-size":2048000
- }
- } } ] }
-
-blockdev-change-medium
-----------------------
-
-Changes the medium inserted into a block device by ejecting the current medium
-and loading a new image file which is inserted as the new medium.
-
-Arguments:
-
-- "device": block device name (deprecated, use @id instead)
- (json-string, optional)
-- "id": the name or QOM path of the guest device (json-string, optional)
-- "filename": filename of the new image (json-string)
-- "format": format of the new image (json-string, optional)
-- "read-only-mode": new read-only mode (json-string, optional)
- - Possible values: "retain" (default), "read-only", "read-write"
-
-Examples:
-
-1. Change a removable medium
-
--> { "execute": "blockdev-change-medium",
- "arguments": { "id": "ide0-1-0",
- "filename": "/srv/images/Fedora-12-x86_64-DVD.iso",
- "format": "raw" } }
-<- { "return": {} }
-
-2. Load a read-only medium into a writable drive
-
--> { "execute": "blockdev-change-medium",
- "arguments": { "id": "floppyA",
- "filename": "/srv/images/ro.img",
- "format": "raw",
- "read-only-mode": "retain" } }
-
-<- { "error":
- { "class": "GenericError",
- "desc": "Could not open '/srv/images/ro.img': Permission denied" } }
-
--> { "execute": "blockdev-change-medium",
- "arguments": { "id": "floppyA",
- "filename": "/srv/images/ro.img",
- "format": "raw",
- "read-only-mode": "read-only" } }
-
-<- { "return": {} }
-
-query-memdev
-------------
-
-Show memory devices information.
-
-
-Example (1):
-
--> { "execute": "query-memdev" }
-<- { "return": [
- {
- "size": 536870912,
- "merge": false,
- "dump": true,
- "prealloc": false,
- "host-nodes": [0, 1],
- "policy": "bind"
- },
- {
- "size": 536870912,
- "merge": false,
- "dump": true,
- "prealloc": true,
- "host-nodes": [2, 3],
- "policy": "preferred"
- }
- ]
- }
-
-query-memory-devices
---------------------
-
-Return a list of memory devices.
-
-Example:
--> { "execute": "query-memory-devices" }
-<- { "return": [ { "data":
- { "addr": 5368709120,
- "hotpluggable": true,
- "hotplugged": true,
- "id": "d1",
- "memdev": "/objects/memX",
- "node": 0,
- "size": 1073741824,
- "slot": 0},
- "type": "dimm"
- } ] }
-
-query-acpi-ospm-status
-----------------------
-
-Return list of ACPIOSTInfo for devices that support status reporting
-via ACPI _OST method.
-
-Example:
--> { "execute": "query-acpi-ospm-status" }
-<- { "return": [ { "device": "d1", "slot": "0", "slot-type": "DIMM", "source": 1, "status": 0},
- { "slot": "1", "slot-type": "DIMM", "source": 0, "status": 0},
- { "slot": "2", "slot-type": "DIMM", "source": 0, "status": 0},
- { "slot": "3", "slot-type": "DIMM", "source": 0, "status": 0}
- ]}
-
-rtc-reset-reinjection
----------------------
-
-Reset the RTC interrupt reinjection backlog.
-
-Arguments: None.
-
-Example:
-
--> { "execute": "rtc-reset-reinjection" }
-<- { "return": {} }
-
-trace-event-get-state
----------------------
-
-Query the state of events.
-
-Arguments:
-
-- "name": Event name pattern (json-string).
-- "vcpu": The vCPU to query, any vCPU by default (json-int, optional).
-
-An event is returned if:
-- its name matches the "name" pattern, and
-- if "vcpu" is given, the event has the "vcpu" property.
-
-Therefore, if "vcpu" is given, the operation will only match per-vCPU events,
-returning their state on the specified vCPU. Special case: if "name" is an exact
-match, "vcpu" is given and the event does not have the "vcpu" property, an error
-is returned.
-
-Example:
-
--> { "execute": "trace-event-get-state", "arguments": { "name": "qemu_memalign" } }
-<- { "return": [ { "name": "qemu_memalign", "state": "disabled" } ] }
-
-trace-event-set-state
----------------------
-
-Set the state of events.
-
-Arguments:
-
-- "name": Event name pattern (json-string).
-- "enable": Whether to enable or disable the event (json-bool).
-- "ignore-unavailable": Whether to ignore errors for events that cannot be
- changed (json-bool, optional).
-- "vcpu": The vCPU to act upon, all vCPUs by default (json-int, optional).
-
-An event's state is modified if:
-- its name matches the "name" pattern, and
-- if "vcpu" is given, the event has the "vcpu" property.
-
-Therefore, if "vcpu" is given, the operation will only match per-vCPU events,
-setting their state on the specified vCPU. Special case: if "name" is an exact
-match, "vcpu" is given and the event does not have the "vcpu" property, an error
-is returned.
-
-Example:
-
--> { "execute": "trace-event-set-state", "arguments": { "name": "qemu_memalign", "enable": "true" } }
-<- { "return": {} }
-
-input-send-event
-----------------
-
-Send input event to guest.
-
-Arguments:
-
-- "device": display device (json-string, optional)
-- "head": display head (json-int, optional)
-- "events": list of input events
-
-The consoles are visible in the qom tree, under
-/backend/console[$index]. They have a device link and head property, so
-it is possible to map which console belongs to which device and display.
-
-Example (1):
-
-Press left mouse button.
-
--> { "execute": "input-send-event",
- "arguments": { "device": "video0",
- "events": [ { "type": "btn",
- "data" : { "down": true, "button": "left" } } ] } }
-<- { "return": {} }
-
--> { "execute": "input-send-event",
- "arguments": { "device": "video0",
- "events": [ { "type": "btn",
- "data" : { "down": false, "button": "left" } } ] } }
-<- { "return": {} }
-
-Example (2):
-
-Press ctrl-alt-del.
-
--> { "execute": "input-send-event",
- "arguments": { "events": [
- { "type": "key", "data" : { "down": true,
- "key": {"type": "qcode", "data": "ctrl" } } },
- { "type": "key", "data" : { "down": true,
- "key": {"type": "qcode", "data": "alt" } } },
- { "type": "key", "data" : { "down": true,
- "key": {"type": "qcode", "data": "delete" } } } ] } }
-<- { "return": {} }
-
-Example (3):
-
-Move mouse pointer to absolute coordinates (20000, 400).
-
--> { "execute": "input-send-event" ,
- "arguments": { "events": [
- { "type": "abs", "data" : { "axis": "x", "value" : 20000 } },
- { "type": "abs", "data" : { "axis": "y", "value" : 400 } } ] } }
-<- { "return": {} }
-
-block-set-write-threshold
-------------
-
-Change the write threshold for a block drive. The threshold is an offset,
-thus must be non-negative. Default is no write threshold.
-Setting the threshold to zero disables it.
-
-Arguments:
-
-- "node-name": the node name in the block driver state graph (json-string)
-- "write-threshold": the write threshold in bytes (json-int)
-
-Example:
-
--> { "execute": "block-set-write-threshold",
- "arguments": { "node-name": "mydev",
- "write-threshold": 17179869184 } }
-<- { "return": {} }
-
-Show rocker switch
-------------------
-
-Arguments:
-
-- "name": switch name
-
-Example:
-
--> { "execute": "query-rocker", "arguments": { "name": "sw1" } }
-<- { "return": {"name": "sw1", "ports": 2, "id": 1327446905938}}
-
-Show rocker switch ports
-------------------------
-
-Arguments:
-
-- "name": switch name
-
-Example:
-
--> { "execute": "query-rocker-ports", "arguments": { "name": "sw1" } }
-<- { "return": [ {"duplex": "full", "enabled": true, "name": "sw1.1",
- "autoneg": "off", "link-up": true, "speed": 10000},
- {"duplex": "full", "enabled": true, "name": "sw1.2",
- "autoneg": "off", "link-up": true, "speed": 10000}
- ]}
-
-Show rocker switch OF-DPA flow tables
--------------------------------------
-
-Arguments:
-
-- "name": switch name
-- "tbl-id": (optional) flow table ID
-
-Example:
-
--> { "execute": "query-rocker-of-dpa-flows", "arguments": { "name": "sw1" } }
-<- { "return": [ {"key": {"in-pport": 0, "priority": 1, "tbl-id": 0},
- "hits": 138,
- "cookie": 0,
- "action": {"goto-tbl": 10},
- "mask": {"in-pport": 4294901760}
- },
- {...more...},
- ]}
-
-Show rocker OF-DPA group tables
--------------------------------
-
-Arguments:
-
-- "name": switch name
-- "type": (optional) group type
-
-Example:
-
--> { "execute": "query-rocker-of-dpa-groups", "arguments": { "name": "sw1" } }
-<- { "return": [ {"type": 0, "out-pport": 2, "pport": 2, "vlan-id": 3841,
- "pop-vlan": 1, "id": 251723778},
- {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3841,
- "pop-vlan": 1, "id": 251723776},
- {"type": 0, "out-pport": 1, "pport": 1, "vlan-id": 3840,
- "pop-vlan": 1, "id": 251658241},
- {"type": 0, "out-pport": 0, "pport": 0, "vlan-id": 3840,
- "pop-vlan": 1, "id": 251658240}
- ]}
-
-query-gic-capabilities
----------------
-
-Return a list of GICCapability objects, describing supported GIC
-(Generic Interrupt Controller) versions.
-
-Arguments: None
-
-Example:
-
--> { "execute": "query-gic-capabilities" }
-<- { "return": [{ "version": 2, "emulated": true, "kernel": false },
- { "version": 3, "emulated": false, "kernel": true } ] }
-
-Show existing/possible CPUs
----------------------------
-
-Arguments: None.
-
-Example for pseries machine type started with
--smp 2,cores=2,maxcpus=4 -cpu POWER8:
-
--> { "execute": "query-hotpluggable-cpus" }
-<- {"return": [
- { "props": { "core-id": 8 }, "type": "POWER8-spapr-cpu-core",
- "vcpus-count": 1 },
- { "props": { "core-id": 0 }, "type": "POWER8-spapr-cpu-core",
- "vcpus-count": 1, "qom-path": "/machine/unattached/device[0]"}
- ]}'
-
-Example for pc machine type started with
--smp 1,maxcpus=2:
- -> { "execute": "query-hotpluggable-cpus" }
- <- {"return": [
- {
- "type": "qemu64-x86_64-cpu", "vcpus-count": 1,
- "props": {"core-id": 0, "socket-id": 1, "thread-id": 0}
- },
- {
- "qom-path": "/machine/unattached/device[0]",
- "type": "qemu64-x86_64-cpu", "vcpus-count": 1,
- "props": {"core-id": 0, "socket-id": 0, "thread-id": 0}
- }
- ]}
diff --git a/docs/qmp-events.txt b/docs/qmp-events.txt
deleted file mode 100644
index e0a2365c63..0000000000
--- a/docs/qmp-events.txt
+++ /dev/null
@@ -1,731 +0,0 @@
- QEMU Machine Protocol Events
- ============================
-
-ACPI_DEVICE_OST
----------------
-
-Emitted when guest executes ACPI _OST method.
-
- - data: ACPIOSTInfo type as described in qapi-schema.json
-
-{ "event": "ACPI_DEVICE_OST",
- "data": { "device": "d1", "slot": "0", "slot-type": "DIMM", "source": 1, "status": 0 } }
-
-BALLOON_CHANGE
---------------
-
-Emitted when the guest changes the actual BALLOON level. This
-value is equivalent to the 'actual' field return by the
-'query-balloon' command
-
-Data:
-
-- "actual": actual level of the guest memory balloon in bytes (json-number)
-
-Example:
-
-{ "event": "BALLOON_CHANGE",
- "data": { "actual": 944766976 },
- "timestamp": { "seconds": 1267020223, "microseconds": 435656 } }
-
-Note: this event is rate-limited.
-
-BLOCK_IMAGE_CORRUPTED
----------------------
-
-Emitted when a disk image is being marked corrupt. The image can be
-identified by its device or node name. The 'device' field is always
-present for compatibility reasons, but it can be empty ("") if the
-image does not have a device name associated.
-
-Data:
-
-- "device": Device name (json-string)
-- "node-name": Node name (json-string, optional)
-- "msg": Informative message (e.g., reason for the corruption)
- (json-string)
-- "offset": If the corruption resulted from an image access, this
- is the host's access offset into the image
- (json-int, optional)
-- "size": If the corruption resulted from an image access, this
- is the access size (json-int, optional)
-
-Example:
-
-{ "event": "BLOCK_IMAGE_CORRUPTED",
- "data": { "device": "ide0-hd0", "node-name": "node0",
- "msg": "Prevented active L1 table overwrite", "offset": 196608,
- "size": 65536 },
- "timestamp": { "seconds": 1378126126, "microseconds": 966463 } }
-
-BLOCK_IO_ERROR
---------------
-
-Emitted when a disk I/O error occurs.
-
-Data:
-
-- "device": device name. This is always present for compatibility
- reasons, but it can be empty ("") if the image does not
- have a device name associated. (json-string)
-- "node-name": node name. Note that errors may be reported for the root node
- that is directly attached to a guest device rather than for the
- node where the error occurred. (json-string)
-- "operation": I/O operation (json-string, "read" or "write")
-- "action": action that has been taken, it's one of the following (json-string):
- "ignore": error has been ignored
- "report": error has been reported to the device
- "stop": the VM is going to stop because of the error
-
-Example:
-
-{ "event": "BLOCK_IO_ERROR",
- "data": { "device": "ide0-hd1",
- "node-name": "#block212",
- "operation": "write",
- "action": "stop" },
- "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
-
-Note: If action is "stop", a STOP event will eventually follow the
-BLOCK_IO_ERROR event.
-
-BLOCK_JOB_CANCELLED
--------------------
-
-Emitted when a block job has been cancelled.
-
-Data:
-
-- "type": Job type (json-string; "stream" for image streaming
- "commit" for block commit)
-- "device": Job identifier. Originally the device name but other
- values are allowed since QEMU 2.7 (json-string)
-- "len": Maximum progress value (json-int)
-- "offset": Current progress value (json-int)
- On success this is equal to len.
- On failure this is less than len.
-- "speed": Rate limit, bytes per second (json-int)
-
-Example:
-
-{ "event": "BLOCK_JOB_CANCELLED",
- "data": { "type": "stream", "device": "virtio-disk0",
- "len": 10737418240, "offset": 134217728,
- "speed": 0 },
- "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
-
-BLOCK_JOB_COMPLETED
--------------------
-
-Emitted when a block job has completed.
-
-Data:
-
-- "type": Job type (json-string; "stream" for image streaming
- "commit" for block commit)
-- "device": Job identifier. Originally the device name but other
- values are allowed since QEMU 2.7 (json-string)
-- "len": Maximum progress value (json-int)
-- "offset": Current progress value (json-int)
- On success this is equal to len.
- On failure this is less than len.
-- "speed": Rate limit, bytes per second (json-int)
-- "error": Error message (json-string, optional)
- Only present on failure. This field contains a human-readable
- error message. There are no semantics other than that streaming
- has failed and clients should not try to interpret the error
- string.
-
-Example:
-
-{ "event": "BLOCK_JOB_COMPLETED",
- "data": { "type": "stream", "device": "virtio-disk0",
- "len": 10737418240, "offset": 10737418240,
- "speed": 0 },
- "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
-
-BLOCK_JOB_ERROR
----------------
-
-Emitted when a block job encounters an error.
-
-Data:
-
-- "device": Job identifier. Originally the device name but other
- values are allowed since QEMU 2.7 (json-string)
-- "operation": I/O operation (json-string, "read" or "write")
-- "action": action that has been taken, it's one of the following (json-string):
- "ignore": error has been ignored, the job may fail later
- "report": error will be reported and the job canceled
- "stop": error caused job to be paused
-
-Example:
-
-{ "event": "BLOCK_JOB_ERROR",
- "data": { "device": "ide0-hd1",
- "operation": "write",
- "action": "stop" },
- "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
-
-BLOCK_JOB_READY
----------------
-
-Emitted when a block job is ready to complete.
-
-Data:
-
-- "type": Job type (json-string; "stream" for image streaming
- "commit" for block commit)
-- "device": Job identifier. Originally the device name but other
- values are allowed since QEMU 2.7 (json-string)
-- "len": Maximum progress value (json-int)
-- "offset": Current progress value (json-int)
- On success this is equal to len.
- On failure this is less than len.
-- "speed": Rate limit, bytes per second (json-int)
-
-Example:
-
-{ "event": "BLOCK_JOB_READY",
- "data": { "device": "drive0", "type": "mirror", "speed": 0,
- "len": 2097152, "offset": 2097152 }
- "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
-
-Note: The "ready to complete" status is always reset by a BLOCK_JOB_ERROR
-event.
-
-DEVICE_DELETED
---------------
-
-Emitted whenever the device removal completion is acknowledged
-by the guest.
-At this point, it's safe to reuse the specified device ID.
-Device removal can be initiated by the guest or by HMP/QMP commands.
-
-Data:
-
-- "device": device name (json-string, optional)
-- "path": device path (json-string)
-
-{ "event": "DEVICE_DELETED",
- "data": { "device": "virtio-net-pci-0",
- "path": "/machine/peripheral/virtio-net-pci-0" },
- "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
-
-DEVICE_TRAY_MOVED
------------------
-
-It's emitted whenever the tray of a removable device is moved by the guest
-or by HMP/QMP commands.
-
-Data:
-
-- "device": Block device name. This is always present for compatibility
- reasons, but it can be empty ("") if the image does not have a
- device name associated. (json-string)
-- "id": The name or QOM path of the guest device (json-string)
-- "tray-open": true if the tray has been opened or false if it has been closed
- (json-bool)
-
-{ "event": "DEVICE_TRAY_MOVED",
- "data": { "device": "ide1-cd0",
- "id": "/machine/unattached/device[22]",
- "tray-open": true
- },
- "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
-
-DUMP_COMPLETED
---------------
-
-Emitted when the guest has finished one memory dump.
-
-Data:
-
-- "result": DumpQueryResult type described in qapi-schema.json
-- "error": Error message when dump failed. This is only a
- human-readable string provided when dump failed. It should not be
- parsed in any way (json-string, optional)
-
-Example:
-
-{ "event": "DUMP_COMPLETED",
- "data": {"result": {"total": 1090650112, "status": "completed",
- "completed": 1090650112} } }
-
-GUEST_PANICKED
---------------
-
-Emitted when guest OS panic is detected.
-
-Data:
-
-- "action": Action that has been taken (json-string, currently always "pause").
-
-Example:
-
-{ "event": "GUEST_PANICKED",
- "data": { "action": "pause" } }
-
-MEM_UNPLUG_ERROR
---------------------
-Emitted when memory hot unplug error occurs.
-
-Data:
-
-- "device": device name (json-string)
-- "msg": Informative message (e.g., reason for the error) (json-string)
-
-Example:
-
-{ "event": "MEM_UNPLUG_ERROR"
- "data": { "device": "dimm1",
- "msg": "acpi: device unplug for unsupported device"
- },
- "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
-
-NIC_RX_FILTER_CHANGED
----------------------
-
-The event is emitted once until the query command is executed,
-the first event will always be emitted.
-
-Data:
-
-- "name": net client name (json-string)
-- "path": device path (json-string)
-
-{ "event": "NIC_RX_FILTER_CHANGED",
- "data": { "name": "vnet0",
- "path": "/machine/peripheral/vnet0/virtio-backend" },
- "timestamp": { "seconds": 1368697518, "microseconds": 326866 } }
-}
-
-POWERDOWN
----------
-
-Emitted when the Virtual Machine is powered down through the power
-control system, such as via ACPI.
-
-Data: None.
-
-Example:
-
-{ "event": "POWERDOWN",
- "timestamp": { "seconds": 1267040730, "microseconds": 682951 } }
-
-QUORUM_FAILURE
---------------
-
-Emitted by the Quorum block driver if it fails to establish a quorum.
-
-Data:
-
-- "reference": device name if defined else node name.
-- "sector-num": Number of the first sector of the failed read operation.
-- "sectors-count": Failed read operation sector count.
-
-Example:
-
-{ "event": "QUORUM_FAILURE",
- "data": { "reference": "usr1", "sector-num": 345435, "sectors-count": 5 },
- "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
-
-Note: this event is rate-limited.
-
-QUORUM_REPORT_BAD
------------------
-
-Emitted to report a corruption of a Quorum file.
-
-Data:
-
-- "type": Quorum operation type
-- "error": Error message (json-string, optional)
- Only present on failure. This field contains a human-readable
- error message. There are no semantics other than that the
- block layer reported an error and clients should not try to
- interpret the error string.
-- "node-name": The graph node name of the block driver state.
-- "sector-num": Number of the first sector of the failed read operation.
-- "sectors-count": Failed read operation sector count.
-
-Example:
-
-Read operation:
-{ "event": "QUORUM_REPORT_BAD",
- "data": { "node-name": "node0", "sector-num": 345435, "sectors-count": 5,
- "type": "read" },
- "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
-
-Flush operation:
-{ "event": "QUORUM_REPORT_BAD",
- "data": { "node-name": "node0", "sector-num": 0, "sectors-count": 2097120,
- "type": "flush", "error": "Broken pipe" },
- "timestamp": { "seconds": 1456406829, "microseconds": 291763 } }
-
-Note: this event is rate-limited.
-
-RESET
------
-
-Emitted when the Virtual Machine is reset.
-
-Data: None.
-
-Example:
-
-{ "event": "RESET",
- "timestamp": { "seconds": 1267041653, "microseconds": 9518 } }
-
-RESUME
-------
-
-Emitted when the Virtual Machine resumes execution.
-
-Data: None.
-
-Example:
-
-{ "event": "RESUME",
- "timestamp": { "seconds": 1271770767, "microseconds": 582542 } }
-
-RTC_CHANGE
-----------
-
-Emitted when the guest changes the RTC time.
-
-Data:
-
-- "offset": Offset between base RTC clock (as specified by -rtc base), and
-new RTC clock value (json-number)
-
-Example:
-
-{ "event": "RTC_CHANGE",
- "data": { "offset": 78 },
- "timestamp": { "seconds": 1267020223, "microseconds": 435656 } }
-
-Note: this event is rate-limited.
-
-SHUTDOWN
---------
-
-Emitted when the Virtual Machine has shut down, indicating that qemu
-is about to exit.
-
-Data: None.
-
-Example:
-
-{ "event": "SHUTDOWN",
- "timestamp": { "seconds": 1267040730, "microseconds": 682951 } }
-
-Note: If the command-line option "-no-shutdown" has been specified, a STOP
-event will eventually follow the SHUTDOWN event.
-
-SPICE_CONNECTED
----------------
-
-Emitted when a SPICE client connects.
-
-Data:
-
-- "server": Server information (json-object)
- - "host": IP address (json-string)
- - "port": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
-- "client": Client information (json-object)
- - "host": IP address (json-string)
- - "port": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
-
-Example:
-
-{ "timestamp": {"seconds": 1290688046, "microseconds": 388707},
- "event": "SPICE_CONNECTED",
- "data": {
- "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"},
- "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"}
-}}
-
-SPICE_DISCONNECTED
-------------------
-
-Emitted when a SPICE client disconnects.
-
-Data:
-
-- "server": Server information (json-object)
- - "host": IP address (json-string)
- - "port": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
-- "client": Client information (json-object)
- - "host": IP address (json-string)
- - "port": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
-
-Example:
-
-{ "timestamp": {"seconds": 1290688046, "microseconds": 388707},
- "event": "SPICE_DISCONNECTED",
- "data": {
- "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"},
- "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"}
-}}
-
-SPICE_INITIALIZED
------------------
-
-Emitted after initial handshake and authentication takes place (if any)
-and the SPICE channel is up and running
-
-Data:
-
-- "server": Server information (json-object)
- - "host": IP address (json-string)
- - "port": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "auth": authentication method (json-string, optional)
-- "client": Client information (json-object)
- - "host": IP address (json-string)
- - "port": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "connection-id": spice connection id. All channels with the same id
- belong to the same spice session (json-int)
- - "channel-type": channel type. "1" is the main control channel, filter for
- this one if you want track spice sessions only (json-int)
- - "channel-id": channel id. Usually "0", might be different needed when
- multiple channels of the same type exist, such as multiple
- display channels in a multihead setup (json-int)
- - "tls": whevener the channel is encrypted (json-bool)
-
-Example:
-
-{ "timestamp": {"seconds": 1290688046, "microseconds": 417172},
- "event": "SPICE_INITIALIZED",
- "data": {"server": {"auth": "spice", "port": "5921",
- "family": "ipv4", "host": "127.0.0.1"},
- "client": {"port": "49004", "family": "ipv4", "channel-type": 3,
- "connection-id": 1804289383, "host": "127.0.0.1",
- "channel-id": 0, "tls": true}
-}}
-
-SPICE_MIGRATE_COMPLETED
------------------------
-
-Emitted when SPICE migration has completed
-
-Data: None.
-
-Example:
-
-{ "timestamp": {"seconds": 1290688046, "microseconds": 417172},
- "event": "SPICE_MIGRATE_COMPLETED" }
-
-MIGRATION
----------
-
-Emitted when a migration event happens
-
-Data: None.
-
- - "status": migration status
- See MigrationStatus in ~/qapi-schema.json for possible values
-
-Example:
-
-{"timestamp": {"seconds": 1432121972, "microseconds": 744001},
- "event": "MIGRATION", "data": {"status": "completed"}}
-
-MIGRATION_PASS
---------------
-
-Emitted from the source side of a migration at the start of each pass
-(when it syncs the dirty bitmap)
-
-Data: None.
-
- - "pass": An incrementing count (starting at 1 on the first pass)
-
-Example:
-{"timestamp": {"seconds": 1449669631, "microseconds": 239225},
- "event": "MIGRATION_PASS", "data": {"pass": 2}}
-
-STOP
-----
-
-Emitted when the Virtual Machine is stopped.
-
-Data: None.
-
-Example:
-
-{ "event": "STOP",
- "timestamp": { "seconds": 1267041730, "microseconds": 281295 } }
-
-SUSPEND
--------
-
-Emitted when guest enters S3 state.
-
-Data: None.
-
-Example:
-
-{ "event": "SUSPEND",
- "timestamp": { "seconds": 1344456160, "microseconds": 309119 } }
-
-SUSPEND_DISK
-------------
-
-Emitted when the guest makes a request to enter S4 state.
-
-Data: None.
-
-Example:
-
-{ "event": "SUSPEND_DISK",
- "timestamp": { "seconds": 1344456160, "microseconds": 309119 } }
-
-Note: QEMU shuts down when entering S4 state.
-
-VNC_CONNECTED
--------------
-
-Emitted when a VNC client establishes a connection.
-
-Data:
-
-- "server": Server information (json-object)
- - "host": IP address (json-string)
- - "service": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "auth": authentication method (json-string, optional)
-- "client": Client information (json-object)
- - "host": IP address (json-string)
- - "service": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
-
-Example:
-
-{ "event": "VNC_CONNECTED",
- "data": {
- "server": { "auth": "sasl", "family": "ipv4",
- "service": "5901", "host": "0.0.0.0" },
- "client": { "family": "ipv4", "service": "58425",
- "host": "127.0.0.1" } },
- "timestamp": { "seconds": 1262976601, "microseconds": 975795 } }
-
-
-Note: This event is emitted before any authentication takes place, thus
-the authentication ID is not provided.
-
-VNC_DISCONNECTED
-----------------
-
-Emitted when the connection is closed.
-
-Data:
-
-- "server": Server information (json-object)
- - "host": IP address (json-string)
- - "service": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "auth": authentication method (json-string, optional)
-- "client": Client information (json-object)
- - "host": IP address (json-string)
- - "service": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "x509_dname": TLS dname (json-string, optional)
- - "sasl_username": SASL username (json-string, optional)
-
-Example:
-
-{ "event": "VNC_DISCONNECTED",
- "data": {
- "server": { "auth": "sasl", "family": "ipv4",
- "service": "5901", "host": "0.0.0.0" },
- "client": { "family": "ipv4", "service": "58425",
- "host": "127.0.0.1", "sasl_username": "luiz" } },
- "timestamp": { "seconds": 1262976601, "microseconds": 975795 } }
-
-VNC_INITIALIZED
----------------
-
-Emitted after authentication takes place (if any) and the VNC session is
-made active.
-
-Data:
-
-- "server": Server information (json-object)
- - "host": IP address (json-string)
- - "service": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "auth": authentication method (json-string, optional)
-- "client": Client information (json-object)
- - "host": IP address (json-string)
- - "service": port number (json-string)
- - "family": address family (json-string, "ipv4" or "ipv6")
- - "x509_dname": TLS dname (json-string, optional)
- - "sasl_username": SASL username (json-string, optional)
-
-Example:
-
-{ "event": "VNC_INITIALIZED",
- "data": {
- "server": { "auth": "sasl", "family": "ipv4",
- "service": "5901", "host": "0.0.0.0"},
- "client": { "family": "ipv4", "service": "46089",
- "host": "127.0.0.1", "sasl_username": "luiz" } },
- "timestamp": { "seconds": 1263475302, "microseconds": 150772 } }
-
-VSERPORT_CHANGE
----------------
-
-Emitted when the guest opens or closes a virtio-serial port.
-
-Data:
-
-- "id": device identifier of the virtio-serial port (json-string)
-- "open": true if the guest has opened the virtio-serial port (json-bool)
-
-Example:
-
-{ "event": "VSERPORT_CHANGE",
- "data": { "id": "channel0", "open": true },
- "timestamp": { "seconds": 1401385907, "microseconds": 422329 } }
-
-Note: this event is rate-limited separately for each "id".
-
-WAKEUP
-------
-
-Emitted when the guest has woken up from S3 and is running.
-
-Data: None.
-
-Example:
-
-{ "event": "WAKEUP",
- "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
-
-WATCHDOG
---------
-
-Emitted when the watchdog device's timer is expired.
-
-Data:
-
-- "action": Action that has been taken, it's one of the following (json-string):
- "reset", "shutdown", "poweroff", "pause", "debug", or "none"
-
-Example:
-
-{ "event": "WATCHDOG",
- "data": { "action": "reset" },
- "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
-
-Note: If action is "reset", "shutdown", or "pause" the WATCHDOG event is
-followed respectively by the RESET, SHUTDOWN, or STOP events.
-
-Note: this event is rate-limited.
diff --git a/docs/qmp-intro.txt b/docs/qmp-intro.txt
index f6a3a031e9..60deafbae6 100644
--- a/docs/qmp-intro.txt
+++ b/docs/qmp-intro.txt
@@ -16,8 +16,7 @@ QMP is JSON[1] based and features the following:
For detailed information on QMP's usage, please, refer to the following files:
o qmp-spec.txt QEMU Machine Protocol current specification
-o qmp-commands.txt QMP supported commands (auto-generated at build-time)
-o qmp-events.txt List of available asynchronous events
+o qemu-qmp-ref.html QEMU QMP commands and events (auto-generated at build-time)
[1] http://www.json.org
diff --git a/docs/replay.txt b/docs/replay.txt
index 779c6c059e..347b2ff055 100644
--- a/docs/replay.txt
+++ b/docs/replay.txt
@@ -195,3 +195,17 @@ Queue is flushed at checkpoints and information about processed requests
is recorded to the log. In replay phase the queue is matched with
events read from the log. Therefore block devices requests are processed
deterministically.
+
+Network devices
+---------------
+
+Record and replay for network interactions is performed with the network filter.
+Each backend must have its own instance of the replay filter as follows:
+ -netdev user,id=net1 -device rtl8139,netdev=net1
+ -object filter-replay,id=replay,netdev=net1
+
+Replay network filter is used to record and replay network packets. While
+recording the virtual machine this filter puts all packets coming from
+the outer world into the log. In replay mode packets from the log are
+injected into the network device. All interactions with network backend
+in replay mode are disabled.
diff --git a/docs/specs/fw_cfg.txt b/docs/specs/fw_cfg.txt
index 7a5f8c7824..08c00bdf44 100644
--- a/docs/specs/fw_cfg.txt
+++ b/docs/specs/fw_cfg.txt
@@ -33,6 +33,10 @@ the selector value is between 0x4000-0x7fff or 0xc000-0xffff.
NOTE: As of QEMU v2.4, writes to the fw_cfg data register are no
longer supported, and will be ignored (treated as no-ops)!
+NOTE: As of QEMU v2.9, writes are reinstated, but only through the DMA
+ interface (see below). Furthermore, writeability of any specific item is
+ governed independently of Bit14 in the selector key value.
+
Bit15 of the selector register indicates whether the configuration
setting is architecture specific. A value of 0 means the item is a
generic configuration item. A value of 1 means the item is specific
@@ -43,7 +47,7 @@ value between 0x8000-0xffff.
== Data Register ==
-* Read/Write (writes ignored as of QEMU v2.4)
+* Read/Write (writes ignored as of QEMU v2.4, but see the DMA interface)
* Location: platform dependent (IOport [*] or MMIO)
* Width: 8-bit (if IOport), 8/16/32/64-bit (if MMIO)
* Endianness: string-preserving
@@ -134,8 +138,8 @@ struct FWCfgFile { /* an individual file entry, 64 bytes total */
=== All Other Data Items ===
-Please consult the QEMU source for the most up-to-date and authoritative
-list of selector keys and their respective items' purpose and format.
+Please consult the QEMU source for the most up-to-date and authoritative list
+of selector keys and their respective items' purpose, format and writeability.
=== Ranges ===
@@ -144,13 +148,15 @@ items, and up to 0x4000 architecturally specific ones.
Selector Reg. Range Usage
--------------- -----------
-0x0000 - 0x3fff Generic (0x0000 - 0x3fff, RO)
+0x0000 - 0x3fff Generic (0x0000 - 0x3fff, generally RO, possibly RW through
+ the DMA interface in QEMU v2.9+)
0x4000 - 0x7fff Generic (0x0000 - 0x3fff, RW, ignored in QEMU v2.4+)
-0x8000 - 0xbfff Arch. Specific (0x0000 - 0x3fff, RO)
+0x8000 - 0xbfff Arch. Specific (0x0000 - 0x3fff, generally RO, possibly RW
+ through the DMA interface in QEMU v2.9+)
0xc000 - 0xffff Arch. Specific (0x0000 - 0x3fff, RW, ignored in v2.4+)
-In practice, the number of allowed firmware configuration items is given
-by the value of FW_CFG_MAX_ENTRY (see fw_cfg.h).
+In practice, the number of allowed firmware configuration items depends on the
+machine type/version.
= Guest-side DMA Interface =
@@ -182,6 +188,7 @@ The "control" field has the following bits:
- Bit 1: Read
- Bit 2: Skip
- Bit 3: Select. The upper 16 bits are the selected index.
+ - Bit 4: Write
When an operation is triggered, if the "control" field has bit 3 set, the
upper 16 bits are interpreted as an index of a firmware configuration item.
@@ -191,8 +198,17 @@ If the "control" field has bit 1 set, a read operation will be performed.
"length" bytes for the current selector and offset will be copied into the
physical RAM address specified by the "address" field.
-If the "control" field has bit 2 set (and not bit 1), a skip operation will be
-performed. The offset for the current selector will be advanced "length" bytes.
+If the "control" field has bit 4 set (and not bit 1), a write operation will be
+performed. "length" bytes will be copied from the physical RAM address
+specified by the "address" field to the current selector and offset. QEMU
+prevents starting or finishing the write beyond the end of the item associated
+with the current selector (i.e., the item cannot be resized). Truncated writes
+are dropped entirely. Writes to read-only items are also rejected. All of these
+write errors set bit 0 (the error bit) in the "control" field.
+
+If the "control" field has bit 2 set (and neither bit 1 nor bit 4), a skip
+operation will be performed. The offset for the current selector will be
+advanced "length" bytes.
To check the result, read the "control" field:
error bit set -> something went wrong.
@@ -234,3 +250,5 @@ Prefix "opt/org.qemu/" is reserved for QEMU itself.
Use of names not beginning with "opt/" is potentially dangerous and
entirely unsupported. QEMU will warn if you try.
+
+All externally provided fw_cfg items are read-only to the guest.
diff --git a/docs/specs/pci-ids.txt b/docs/specs/pci-ids.txt
index fd27c677d4..16fdb0c93f 100644
--- a/docs/specs/pci-ids.txt
+++ b/docs/specs/pci-ids.txt
@@ -57,7 +57,10 @@ PCI devices (other than virtio):
1b36:0005 PCI test device (docs/specs/pci-testdev.txt)
1b36:0006 PCI Rocker Ethernet switch device
1b36:0007 PCI SD Card Host Controller Interface (SDHCI)
+1b36:0008 PCIe host bridge
+1b36:0009 PCI Expander Bridge (-device pxb)
1b36:000a PCI-PCI bridge (multiseat)
+1b36:000b PCIe Expander Bridge (-device pxb-pcie)
All these devices are documented in docs/specs.
diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
index d70bd83b13..036890feb0 100644
--- a/docs/specs/vhost-user.txt
+++ b/docs/specs/vhost-user.txt
@@ -259,6 +259,7 @@ Protocol features
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
#define VHOST_USER_PROTOCOL_F_RARP 2
#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
+#define VHOST_USER_PROTOCOL_F_MTU 4
Message types
-------------
@@ -470,6 +471,21 @@ Message types
The first 6 bytes of the payload contain the mac address of the guest to
allow the vhost user backend to construct and broadcast the fake RARP.
+ * VHOST_USER_NET_SET_MTU
+
+ Id: 20
+ Equivalent ioctl: N/A
+ Master payload: u64
+
+ Set host MTU value exposed to the guest.
+ This request should be sent only when VIRTIO_NET_F_MTU feature has been
+ successfully negotiated, VHOST_USER_F_PROTOCOL_FEATURES is present in
+ VHOST_USER_GET_FEATURES and protocol feature bit
+ VHOST_USER_PROTOCOL_F_NET_MTU is present in
+ VHOST_USER_GET_PROTOCOL_FEATURES.
+ If VHOST_USER_PROTOCOL_F_REPLY_ACK is negotiated, slave must respond
+ with zero in case the specified MTU is valid, or non-zero otherwise.
+
VHOST_USER_PROTOCOL_F_REPLY_ACK:
-------------------------------
The original vhost-user specification only demands replies for certain
diff --git a/docs/usb-storage.txt b/docs/usb-storage.txt
index fbc1f2edd8..551af6f88b 100644
--- a/docs/usb-storage.txt
+++ b/docs/usb-storage.txt
@@ -34,7 +34,7 @@ with tree logical units:
Number three emulates the classic bulk-only transport protocol too.
It's called "usb-bot". It shares most code with "usb-storage", and
the guest will not be able to see the difference. The qemu command
-line interface is simliar to usb-uas though, i.e. no automatic scsi
+line interface is similar to usb-uas though, i.e. no automatic scsi
disk creation. It also features support for up to 16 LUNs. The LUN
numbers must be continuous, i.e. for three devices you must use 0+1+2.
The 0+1+5 numbering from the "usb-uas" example isn't going to work
diff --git a/docs/usb2.txt b/docs/usb2.txt
index c7a445afcd..b9e7548073 100644
--- a/docs/usb2.txt
+++ b/docs/usb2.txt
@@ -19,7 +19,7 @@ the controller so the USB 2.0 bus gets a individual name, for example
'-device usb-ehci,id=ehci". This will give you a USB 2.0 bus named
"ehci.0".
-I strongly recomment to also use -device to attach usb devices because
+I strongly recommend to also use -device to attach usb devices because
you can specify the bus they should be attached to this way. Here is
a complete example:
diff --git a/exec.c b/exec.c
index 8d4bb0e8c1..f2bed92b64 100644
--- a/exec.c
+++ b/exec.c
@@ -449,6 +449,39 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
}
/* Called from RCU critical section */
+IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
+ bool is_write)
+{
+ IOMMUTLBEntry iotlb = {0};
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+
+ for (;;) {
+ AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
+ section = address_space_lookup_region(d, addr, false);
+ addr = addr - section->offset_within_address_space
+ + section->offset_within_region;
+ mr = section->mr;
+
+ if (!mr->iommu_ops) {
+ break;
+ }
+
+ iotlb = mr->iommu_ops->translate(mr, addr, is_write);
+ if (!(iotlb.perm & (1 << is_write))) {
+ iotlb.target_as = NULL;
+ break;
+ }
+
+ addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
+ | (addr & iotlb.addr_mask));
+ as = iotlb.target_as;
+ }
+
+ return iotlb;
+}
+
+/* Called from RCU critical section */
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
@@ -511,7 +544,7 @@ static int cpu_common_post_load(void *opaque, int version_id)
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
version_id is increased. */
cpu->interrupt_request &= ~0x01;
- tlb_flush(cpu, 1);
+ tlb_flush(cpu);
return 0;
}
@@ -1654,6 +1687,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
/* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
+ ram_block_notify_add(new_block->host, new_block->max_length);
}
}
@@ -1784,6 +1818,10 @@ void qemu_ram_free(RAMBlock *block)
return;
}
+ if (block->host) {
+ ram_block_notify_remove(block->host, block->max_length);
+ }
+
qemu_mutex_lock_ramlist();
QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
@@ -2393,7 +2431,7 @@ static void tcg_commit(MemoryListener *listener)
*/
d = atomic_rcu_read(&cpuas->as->dispatch);
atomic_rcu_set(&cpuas->memory_dispatch, d);
- tlb_flush(cpuas->cpu, 1);
+ tlb_flush(cpuas->cpu);
}
void address_space_init_dispatch(AddressSpace *as)
@@ -2927,6 +2965,7 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_
if (!memory_access_is_direct(mr, is_write)) {
l = memory_access_size(mr, l, addr);
if (!memory_region_access_valid(mr, xlat, l, is_write)) {
+ rcu_read_unlock();
return false;
}
}
diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h
index f5aed72e8f..f05c8658c1 100644
--- a/fpu/softfloat-specialize.h
+++ b/fpu/softfloat-specialize.h
@@ -116,6 +116,8 @@ float32 float32_default_nan(float_status *status)
#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \
defined(TARGET_XTENSA) || defined(TARGET_S390X) || defined(TARGET_TRICORE)
return const_float32(0x7FC00000);
+#elif defined(TARGET_HPPA)
+ return const_float32(0x7FA00000);
#else
if (status->snan_bit_is_one) {
return const_float32(0x7FBFFFFF);
@@ -139,6 +141,8 @@ float64 float64_default_nan(float_status *status)
#elif defined(TARGET_PPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) || \
defined(TARGET_S390X)
return const_float64(LIT64(0x7FF8000000000000));
+#elif defined(TARGET_HPPA)
+ return const_float64(LIT64(0x7FF4000000000000));
#else
if (status->snan_bit_is_one) {
return const_float64(LIT64(0x7FF7FFFFFFFFFFFF));
@@ -361,7 +365,14 @@ float32 float32_maybe_silence_nan(float32 a_, float_status *status)
{
if (float32_is_signaling_nan(a_, status)) {
if (status->snan_bit_is_one) {
+#ifdef TARGET_HPPA
+ uint32_t a = float32_val(a_);
+ a &= ~0x00400000;
+ a |= 0x00200000;
+ return make_float32(a);
+#else
return float32_default_nan(status);
+#endif
} else {
uint32_t a = float32_val(a_);
a |= (1 << 22);
@@ -449,7 +460,7 @@ static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN,
return 1;
}
}
-#elif defined(TARGET_MIPS)
+#elif defined(TARGET_MIPS) || defined(TARGET_HPPA)
static int pickNaN(flag aIsQNaN, flag aIsSNaN, flag bIsQNaN, flag bIsSNaN,
flag aIsLargerSignificand)
{
@@ -794,7 +805,14 @@ float64 float64_maybe_silence_nan(float64 a_, float_status *status)
{
if (float64_is_signaling_nan(a_, status)) {
if (status->snan_bit_is_one) {
+#ifdef TARGET_HPPA
+ uint64_t a = float64_val(a_);
+ a &= ~0x0008000000000000ULL;
+ a |= 0x0004000000000000ULL;
+ return make_float64(a);
+#else
return float64_default_nan(status);
+#endif
} else {
uint64_t a = float64_val(a_);
a |= LIT64(0x0008000000000000);
diff --git a/gdbstub.c b/gdbstub.c
index de62d26096..2d18ed73be 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -18,6 +18,7 @@
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/cutils.h"
#include "cpu.h"
#ifdef CONFIG_USER_ONLY
@@ -32,6 +33,7 @@
#define MAX_PACKET_LENGTH 4096
#include "qemu/sockets.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
#include "exec/semihost.h"
#include "exec/exec-all.h"
@@ -636,8 +638,8 @@ void gdb_register_coprocessor(CPUState *cpu,
*p = s;
if (g_pos) {
if (g_pos != s->base_reg) {
- fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
- "Expected %d got %d\n", xml, g_pos, s->base_reg);
+ error_report("Error: Bad gdb register numbering for '%s', "
+ "expected %d got %d", xml, g_pos, s->base_reg);
} else {
cpu->gdb_num_g_regs = cpu->gdb_num_regs;
}
@@ -889,7 +891,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
}
case 'k':
/* Kill the target */
- fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
+ error_report("QEMU: Terminated via GDBstub");
exit(0);
case 'D':
/* Detach packet */
@@ -1357,8 +1359,8 @@ void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va)
break;
default:
bad_format:
- fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
- fmt - 1);
+ error_report("gdbstub: Bad syscall format string '%s'",
+ fmt - 1);
break;
}
} else {
@@ -1731,6 +1733,12 @@ int gdbserver_start(const char *device)
CharDriverState *mon_chr;
ChardevCommon common = { 0 };
+ if (!first_cpu) {
+ error_report("gdbstub: meaningless to attach gdb to a "
+ "machine without any CPU.");
+ return -1;
+ }
+
if (!device)
return -1;
if (strcmp(device, "none") != 0) {
diff --git a/hax-stub.c b/hax-stub.c
new file mode 100644
index 0000000000..c0e6f892e5
--- /dev/null
+++ b/hax-stub.c
@@ -0,0 +1,34 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2015, Intel Corporation
+ *
+ * Copyright 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "sysemu/hax.h"
+
+int hax_sync_vcpus(void)
+{
+ return 0;
+}
+
+int hax_init_vcpu(CPUState *cpu)
+{
+ return -ENOSYS;
+}
+
+int hax_smp_cpu_exec(CPUState *cpu)
+{
+ return -ENOSYS;
+}
diff --git a/hmp.c b/hmp.c
index b86961705d..8522efea26 100644
--- a/hmp.c
+++ b/hmp.c
@@ -1808,7 +1808,6 @@ void hmp_object_add(Monitor *mon, const QDict *qdict)
{
Error *err = NULL;
QemuOpts *opts;
- Visitor *v;
Object *obj = NULL;
opts = qemu_opts_from_qdict(qemu_find_opts("object"), qdict, &err);
@@ -1817,9 +1816,7 @@ void hmp_object_add(Monitor *mon, const QDict *qdict)
return;
}
- v = opts_visitor_new(opts);
- obj = user_creatable_add(qdict, v, &err);
- visit_free(v);
+ obj = user_creatable_add_opts(opts, &err);
qemu_opts_del(opts);
if (err) {
@@ -2081,13 +2078,11 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict)
MemdevList *m = memdev_list;
Visitor *v;
char *str;
- int i = 0;
-
while (m) {
v = string_output_visitor_new(false, &str);
visit_type_uint16List(v, NULL, &m->value->host_nodes, NULL);
- monitor_printf(mon, "memory backend: %d\n", i);
+ monitor_printf(mon, "memory backend: %s\n", m->value->id);
monitor_printf(mon, " size: %" PRId64 "\n", m->value->size);
monitor_printf(mon, " merge: %s\n",
m->value->merge ? "true" : "false");
@@ -2103,7 +2098,6 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict)
g_free(str);
visit_free(v);
m = m->next;
- i++;
}
monitor_printf(mon, "\n");
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index faebd91f5f..06b6e7ec66 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -47,7 +47,7 @@ ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
- ret = virtio_pdu_vmarshal(pdu, offset, fmt, ap);
+ ret = pdu->s->transport->pdu_vmarshal(pdu, offset, fmt, ap);
va_end(ap);
return ret;
@@ -59,7 +59,7 @@ ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
- ret = virtio_pdu_vunmarshal(pdu, offset, fmt, ap);
+ ret = pdu->s->transport->pdu_vunmarshal(pdu, offset, fmt, ap);
va_end(ap);
return ret;
@@ -67,7 +67,7 @@ ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
static void pdu_push_and_notify(V9fsPDU *pdu)
{
- virtio_9p_push_and_notify(pdu);
+ pdu->s->transport->push_and_notify(pdu);
}
static int omode_to_uflags(int8_t mode)
@@ -979,6 +979,7 @@ static void coroutine_fn v9fs_attach(void *opaque)
size_t offset = 7;
V9fsQID qid;
ssize_t err;
+ Error *local_err = NULL;
v9fs_string_init(&uname);
v9fs_string_init(&aname);
@@ -1007,26 +1008,36 @@ static void coroutine_fn v9fs_attach(void *opaque)
clunk_fid(s, fid);
goto out;
}
- err = pdu_marshal(pdu, offset, "Q", &qid);
- if (err < 0) {
- clunk_fid(s, fid);
- goto out;
- }
- err += offset;
- memcpy(&s->root_qid, &qid, sizeof(qid));
- trace_v9fs_attach_return(pdu->tag, pdu->id,
- qid.type, qid.version, qid.path);
+
/*
* disable migration if we haven't done already.
* attach could get called multiple times for the same export.
*/
if (!s->migration_blocker) {
- s->root_fid = fid;
error_setg(&s->migration_blocker,
"Migration is disabled when VirtFS export path '%s' is mounted in the guest using mount_tag '%s'",
s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
- migrate_add_blocker(s->migration_blocker);
+ err = migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_free(local_err);
+ error_free(s->migration_blocker);
+ s->migration_blocker = NULL;
+ clunk_fid(s, fid);
+ goto out;
+ }
+ s->root_fid = fid;
+ }
+
+ err = pdu_marshal(pdu, offset, "Q", &qid);
+ if (err < 0) {
+ clunk_fid(s, fid);
+ goto out;
}
+ err += offset;
+
+ memcpy(&s->root_qid, &qid, sizeof(qid));
+ trace_v9fs_attach_return(pdu->tag, pdu->id,
+ qid.type, qid.version, qid.path);
out:
put_fid(pdu, fidp);
out_nofid:
@@ -1633,14 +1644,43 @@ out_nofid:
pdu_complete(pdu, err);
}
+/*
+ * Create a QEMUIOVector for a sub-region of PDU iovecs
+ *
+ * @qiov: uninitialized QEMUIOVector
+ * @skip: number of bytes to skip from beginning of PDU
+ * @size: number of bytes to include
+ * @is_write: true - write, false - read
+ *
+ * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
+ * with qemu_iovec_destroy().
+ */
+static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
+ size_t skip, size_t size,
+ bool is_write)
+{
+ QEMUIOVector elem;
+ struct iovec *iov;
+ unsigned int niov;
+
+ if (is_write) {
+ pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov);
+ } else {
+ pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size);
+ }
+
+ qemu_iovec_init_external(&elem, iov, niov);
+ qemu_iovec_init(qiov, niov);
+ qemu_iovec_concat(qiov, &elem, skip, size);
+}
+
static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
uint64_t off, uint32_t max_count)
{
ssize_t err;
size_t offset = 7;
uint64_t read_count;
- V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
- VirtQueueElement *elem = v->elems[pdu->idx];
+ QEMUIOVector qiov_full;
if (fidp->fs.xattr.len < off) {
read_count = 0;
@@ -1656,9 +1696,11 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp,
}
offset += err;
- err = v9fs_pack(elem->in_sg, elem->in_num, offset,
+ v9fs_init_qiov_from_pdu(&qiov_full, pdu, 0, read_count, false);
+ err = v9fs_pack(qiov_full.iov, qiov_full.niov, offset,
((char *)fidp->fs.xattr.value) + off,
read_count);
+ qemu_iovec_destroy(&qiov_full);
if (err < 0) {
return err;
}
@@ -1732,32 +1774,6 @@ static int coroutine_fn v9fs_do_readdir_with_stat(V9fsPDU *pdu,
return count;
}
-/*
- * Create a QEMUIOVector for a sub-region of PDU iovecs
- *
- * @qiov: uninitialized QEMUIOVector
- * @skip: number of bytes to skip from beginning of PDU
- * @size: number of bytes to include
- * @is_write: true - write, false - read
- *
- * The resulting QEMUIOVector has heap-allocated iovecs and must be cleaned up
- * with qemu_iovec_destroy().
- */
-static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
- size_t skip, size_t size,
- bool is_write)
-{
- QEMUIOVector elem;
- struct iovec *iov;
- unsigned int niov;
-
- virtio_init_iov_from_pdu(pdu, &iov, &niov, is_write);
-
- qemu_iovec_init_external(&elem, iov, niov);
- qemu_iovec_init(qiov, niov);
- qemu_iovec_concat(qiov, &elem, skip, size);
-}
-
static void coroutine_fn v9fs_read(void *opaque)
{
int32_t fid;
@@ -3440,7 +3456,6 @@ void pdu_submit(V9fsPDU *pdu)
/* Returns 0 on success, 1 on failure. */
int v9fs_device_realize_common(V9fsState *s, Error **errp)
{
- V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
int i, len;
struct stat stat;
FsDriverEntry *fse;
@@ -3451,9 +3466,9 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
QLIST_INIT(&s->free_list);
QLIST_INIT(&s->active_list);
for (i = 0; i < (MAX_REQ - 1); i++) {
- QLIST_INSERT_HEAD(&s->free_list, &v->pdus[i], next);
- v->pdus[i].s = s;
- v->pdus[i].idx = i;
+ QLIST_INSERT_HEAD(&s->free_list, &s->pdus[i], next);
+ s->pdus[i].s = s;
+ s->pdus[i].idx = i;
}
v9fs_path_init(&path);
@@ -3521,7 +3536,7 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
rc = 0;
out:
if (rc) {
- if (s->ops->cleanup && s->ctx.private) {
+ if (s->ops && s->ops->cleanup && s->ctx.private) {
s->ops->cleanup(&s->ctx);
}
g_free(s->tag);
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index 3976b7fe3d..b7e836251e 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -99,8 +99,8 @@ enum p9_proto_version {
V9FS_PROTO_2000L = 0x02,
};
-#define P9_NOTAG (u16)(~0)
-#define P9_NOFID (u32)(~0)
+#define P9_NOTAG UINT16_MAX
+#define P9_NOFID UINT32_MAX
#define P9_MAXWELEM 16
#define FID_REFERENCED 0x1
@@ -229,6 +229,8 @@ typedef struct V9fsState
char *tag;
enum p9_proto_version proto_version;
int32_t msize;
+ V9fsPDU pdus[MAX_REQ];
+ const struct V9fsTransport *transport;
/*
* lock ensuring atomic path update
* on rename.
@@ -342,4 +344,24 @@ void pdu_free(V9fsPDU *pdu);
void pdu_submit(V9fsPDU *pdu);
void v9fs_reset(V9fsState *s);
+struct V9fsTransport {
+ ssize_t (*pdu_vmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
+ va_list ap);
+ ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char *fmt,
+ va_list ap);
+ void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
+ unsigned int *pniov, size_t size);
+ void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov,
+ unsigned int *pniov);
+ void (*push_and_notify)(V9fsPDU *pdu);
+};
+
+static inline int v9fs_register_transport(V9fsState *s,
+ const struct V9fsTransport *t)
+{
+ assert(!s->transport);
+ s->transport = t;
+ return 0;
+}
+
#endif
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 1782e4a227..27a4a32f5c 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -20,7 +20,9 @@
#include "hw/virtio/virtio-access.h"
#include "qemu/iov.h"
-void virtio_9p_push_and_notify(V9fsPDU *pdu)
+static const struct V9fsTransport virtio_9p_transport;
+
+static void virtio_9p_push_and_notify(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
@@ -126,6 +128,7 @@ static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag);
virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size);
v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
+ v9fs_register_transport(s, &virtio_9p_transport);
out:
return;
@@ -148,8 +151,8 @@ static void virtio_9p_reset(VirtIODevice *vdev)
v9fs_reset(&v->state);
}
-ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
- const char *fmt, va_list ap)
+static ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
+ const char *fmt, va_list ap)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
@@ -158,8 +161,8 @@ ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
return v9fs_iov_vmarshal(elem->in_sg, elem->in_num, offset, 1, fmt, ap);
}
-ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
- const char *fmt, va_list ap)
+static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
+ const char *fmt, va_list ap)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
@@ -168,22 +171,37 @@ ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap);
}
-void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
- unsigned int *pniov, bool is_write)
+/* The size parameter is used by other transports. Do not drop it. */
+static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
+ unsigned int *pniov, size_t size)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = v->elems[pdu->idx];
- if (is_write) {
- *piov = elem->out_sg;
- *pniov = elem->out_num;
- } else {
- *piov = elem->in_sg;
- *pniov = elem->in_num;
- }
+ *piov = elem->in_sg;
+ *pniov = elem->in_num;
}
+static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
+ unsigned int *pniov)
+{
+ V9fsState *s = pdu->s;
+ V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
+ VirtQueueElement *elem = v->elems[pdu->idx];
+
+ *piov = elem->out_sg;
+ *pniov = elem->out_num;
+}
+
+static const struct V9fsTransport virtio_9p_transport = {
+ .pdu_vmarshal = virtio_pdu_vmarshal,
+ .pdu_vunmarshal = virtio_pdu_vunmarshal,
+ .init_in_iov_from_pdu = virtio_init_in_iov_from_pdu,
+ .init_out_iov_from_pdu = virtio_init_out_iov_from_pdu,
+ .push_and_notify = virtio_9p_push_and_notify,
+};
+
/* virtio-9p device */
static const VMStateDescription vmstate_virtio_9p = {
diff --git a/hw/9pfs/virtio-9p.h b/hw/9pfs/virtio-9p.h
index 25c47c7cb6..e763da2c02 100644
--- a/hw/9pfs/virtio-9p.h
+++ b/hw/9pfs/virtio-9p.h
@@ -10,20 +10,10 @@ typedef struct V9fsVirtioState
VirtIODevice parent_obj;
VirtQueue *vq;
size_t config_size;
- V9fsPDU pdus[MAX_REQ];
VirtQueueElement *elems[MAX_REQ];
V9fsState state;
} V9fsVirtioState;
-void virtio_9p_push_and_notify(V9fsPDU *pdu);
-
-ssize_t virtio_pdu_vmarshal(V9fsPDU *pdu, size_t offset,
- const char *fmt, va_list ap);
-ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
- const char *fmt, va_list ap);
-void virtio_init_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
- unsigned int *pniov, bool is_write);
-
#define TYPE_VIRTIO_9P "virtio-9p-device"
#define VIRTIO_9P(obj) \
OBJECT_CHECK(V9fsVirtioState, (obj), TYPE_VIRTIO_9P)
diff --git a/hw/Makefile.objs b/hw/Makefile.objs
index 0ffd281145..a2c61f6b09 100644
--- a/hw/Makefile.objs
+++ b/hw/Makefile.objs
@@ -1,5 +1,5 @@
devices-dirs-$(call land, $(CONFIG_VIRTIO),$(call land,$(CONFIG_VIRTFS),$(CONFIG_PCI))) += 9pfs/
-devices-dirs-$(CONFIG_ACPI) += acpi/
+devices-dirs-$(CONFIG_SOFTMMU) += acpi/
devices-dirs-$(CONFIG_SOFTMMU) += adc/
devices-dirs-$(CONFIG_SOFTMMU) += audio/
devices-dirs-$(CONFIG_SOFTMMU) += block/
@@ -29,11 +29,11 @@ devices-dirs-$(CONFIG_SOFTMMU) += timer/
devices-dirs-$(CONFIG_TPM) += tpm/
devices-dirs-$(CONFIG_SOFTMMU) += usb/
devices-dirs-$(CONFIG_SOFTMMU) += vfio/
-devices-dirs-$(CONFIG_VIRTIO) += virtio/
+devices-dirs-$(CONFIG_SOFTMMU) += virtio/
devices-dirs-$(CONFIG_SOFTMMU) += watchdog/
devices-dirs-$(CONFIG_SOFTMMU) += xen/
devices-dirs-$(CONFIG_MEM_HOTPLUG) += mem/
-devices-dirs-$(CONFIG_SMBIOS) += smbios/
+devices-dirs-$(CONFIG_SOFTMMU) += smbios/
devices-dirs-y += core/
common-obj-y += $(devices-dirs-y)
obj-y += $(devices-dirs-y)
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 489e63bb75..6acf79860a 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -1,10 +1,19 @@
+ifeq ($(CONFIG_ACPI),y)
common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
-common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
+common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o
common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
common-obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
-common-obj-$(CONFIG_ACPI) += acpi_interface.o
-common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
-common-obj-$(CONFIG_ACPI) += aml-build.o
-common-obj-$(call land,$(CONFIG_ACPI),$(CONFIG_IPMI)) += ipmi.o
+common-obj-$(call lnot,$(CONFIG_ACPI_X86)) += acpi-stub.o
+
+common-obj-y += acpi_interface.o
+common-obj-y += bios-linker-loader.o
+common-obj-y += aml-build.o
+
+common-obj-$(CONFIG_IPMI) += ipmi.o
+common-obj-$(call lnot,$(CONFIG_IPMI)) += ipmi-stub.o
+else
+common-obj-y += acpi-stub.o
+endif
+common-obj-$(CONFIG_ALL) += acpi-stub.o ipmi-stub.o
diff --git a/hw/acpi/acpi-stub.c b/hw/acpi/acpi-stub.c
new file mode 100644
index 0000000000..26bd22f7ec
--- /dev/null
+++ b/hw/acpi/acpi-stub.c
@@ -0,0 +1,29 @@
+/*
+ * ACPI stubs for platforms that don't support ACPI.
+ *
+ * Copyright (c) 2006 Fabrice Bellard
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qmp/qerror.h"
+#include "qmp-commands.h"
+#include "hw/acpi/acpi.h"
+
+void acpi_table_add(const QemuOpts *opts, Error **errp)
+{
+ error_setg(errp, QERR_UNSUPPORTED);
+}
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index 5ac89fefaf..6017ca04bf 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -190,7 +190,7 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
{
MachineState *machine = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(machine);
- CPUArchIdList *id_list;
+ const CPUArchIdList *id_list;
int i;
assert(mc->possible_cpu_arch_ids);
@@ -201,7 +201,6 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
state->devs[i].cpu = id_list->cpus[i].cpu;
state->devs[i].arch_id = id_list->cpus[i].arch_id;
}
- g_free(id_list);
memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
"acpi-mem-hotplug", ACPI_CPU_HOTPLUG_REG_LEN);
memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
@@ -325,7 +324,7 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
Aml *one = aml_int(1);
Aml *sb_scope = aml_scope("_SB");
MachineClass *mc = MACHINE_GET_CLASS(machine);
- CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
+ const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
char *cphp_res_path = g_strdup_printf("%s." CPUHP_RES_DEVICE, res_root);
Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, NULL);
AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
@@ -574,5 +573,4 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_append(table, method);
g_free(cphp_res_path);
- g_free(arch_ids);
}
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index f15a2402fc..5243918125 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -128,7 +128,7 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
Aml *zero = aml_int(0);
Aml *one = aml_int(1);
MachineClass *mc = MACHINE_GET_CLASS(machine);
- CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
+ const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
PCMachineState *pcms = PC_MACHINE(machine);
/*
@@ -329,8 +329,6 @@ void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
apic_idx = apic_id + 1;
}
aml_append(sb_scope, aml_name_decl(CPU_ON_BITMAP, pkg));
- g_free(apic_ids);
-
aml_append(ctx, sb_scope);
method = aml_method("\\_GPE._E02", 0, AML_NOTSERIALIZED);
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 830c475127..5c279bbaca 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -306,7 +306,8 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
if (pm->acpi_memory_hotplug.is_enabled) {
acpi_memory_hotplug_init(pci_address_space_io(lpc_pci), OBJECT(lpc_pci),
- &pm->acpi_memory_hotplug);
+ &pm->acpi_memory_hotplug,
+ ACPI_MEMORY_HOTPLUG_BASE);
}
}
diff --git a/stubs/ipmi.c b/hw/acpi/ipmi-stub.c
index 98b6dcee0d..98b6dcee0d 100644
--- a/stubs/ipmi.c
+++ b/hw/acpi/ipmi-stub.c
diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c
index ec4e64b361..210073d283 100644
--- a/hw/acpi/memory_hotplug.c
+++ b/hw/acpi/memory_hotplug.c
@@ -7,6 +7,34 @@
#include "trace.h"
#include "qapi-event.h"
+#define MEMORY_SLOTS_NUMBER "MDNR"
+#define MEMORY_HOTPLUG_IO_REGION "HPMR"
+#define MEMORY_SLOT_ADDR_LOW "MRBL"
+#define MEMORY_SLOT_ADDR_HIGH "MRBH"
+#define MEMORY_SLOT_SIZE_LOW "MRLL"
+#define MEMORY_SLOT_SIZE_HIGH "MRLH"
+#define MEMORY_SLOT_PROXIMITY "MPX"
+#define MEMORY_SLOT_ENABLED "MES"
+#define MEMORY_SLOT_INSERT_EVENT "MINS"
+#define MEMORY_SLOT_REMOVE_EVENT "MRMV"
+#define MEMORY_SLOT_EJECT "MEJ"
+#define MEMORY_SLOT_SLECTOR "MSEL"
+#define MEMORY_SLOT_OST_EVENT "MOEV"
+#define MEMORY_SLOT_OST_STATUS "MOSC"
+#define MEMORY_SLOT_LOCK "MLCK"
+#define MEMORY_SLOT_STATUS_METHOD "MRST"
+#define MEMORY_SLOT_CRS_METHOD "MCRS"
+#define MEMORY_SLOT_OST_METHOD "MOST"
+#define MEMORY_SLOT_PROXIMITY_METHOD "MPXM"
+#define MEMORY_SLOT_EJECT_METHOD "MEJ0"
+#define MEMORY_SLOT_NOTIFY_METHOD "MTFY"
+#define MEMORY_SLOT_SCAN_METHOD "MSCN"
+#define MEMORY_HOTPLUG_DEVICE "MHPD"
+#define MEMORY_HOTPLUG_IO_LEN 24
+#define MEMORY_DEVICES_CONTAINER "\\_SB.MHPC"
+
+static uint16_t memhp_io_base;
+
static ACPIOSTInfo *acpi_memory_device_status(int slot, MemStatus *mdev)
{
ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
@@ -178,7 +206,7 @@ static const MemoryRegionOps acpi_memory_hotplug_ops = {
};
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
- MemHotplugState *state)
+ MemHotplugState *state, uint16_t io_base)
{
MachineState *machine = MACHINE(qdev_get_machine());
@@ -187,10 +215,12 @@ void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
return;
}
+ assert(!memhp_io_base);
+ memhp_io_base = io_base;
state->devs = g_malloc0(sizeof(*state->devs) * state->dev_count);
memory_region_init_io(&state->io, owner, &acpi_memory_hotplug_ops, state,
- "acpi-mem-hotplug", ACPI_MEMORY_HOTPLUG_IO_LEN);
- memory_region_add_subregion(as, ACPI_MEMORY_HOTPLUG_BASE, &state->io);
+ "acpi-mem-hotplug", MEMORY_HOTPLUG_IO_LEN);
+ memory_region_add_subregion(as, memhp_io_base, &state->io);
}
/**
@@ -306,3 +336,387 @@ const VMStateDescription vmstate_memory_hotplug = {
VMSTATE_END_OF_LIST()
}
};
+
+void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem,
+ const char *res_root,
+ const char *event_handler_method)
+{
+ int i;
+ Aml *ifctx;
+ Aml *method;
+ Aml *dev_container;
+ Aml *mem_ctrl_dev;
+ char *mhp_res_path;
+
+ if (!memhp_io_base) {
+ return;
+ }
+
+ mhp_res_path = g_strdup_printf("%s." MEMORY_HOTPLUG_DEVICE, res_root);
+ mem_ctrl_dev = aml_device("%s", mhp_res_path);
+ {
+ Aml *crs;
+
+ aml_append(mem_ctrl_dev, aml_name_decl("_HID", aml_string("PNP0A06")));
+ aml_append(mem_ctrl_dev,
+ aml_name_decl("_UID", aml_string("Memory hotplug resources")));
+
+ crs = aml_resource_template();
+ aml_append(crs,
+ aml_io(AML_DECODE16, memhp_io_base, memhp_io_base, 0,
+ MEMORY_HOTPLUG_IO_LEN)
+ );
+ aml_append(mem_ctrl_dev, aml_name_decl("_CRS", crs));
+
+ aml_append(mem_ctrl_dev, aml_operation_region(
+ MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO,
+ aml_int(memhp_io_base), MEMORY_HOTPLUG_IO_LEN)
+ );
+
+ }
+ aml_append(table, mem_ctrl_dev);
+
+ dev_container = aml_device(MEMORY_DEVICES_CONTAINER);
+ {
+ Aml *field;
+ Aml *one = aml_int(1);
+ Aml *zero = aml_int(0);
+ Aml *ret_val = aml_local(0);
+ Aml *slot_arg0 = aml_arg(0);
+ Aml *slots_nr = aml_name(MEMORY_SLOTS_NUMBER);
+ Aml *ctrl_lock = aml_name(MEMORY_SLOT_LOCK);
+ Aml *slot_selector = aml_name(MEMORY_SLOT_SLECTOR);
+ char *mmio_path = g_strdup_printf("%s." MEMORY_HOTPLUG_IO_REGION,
+ mhp_res_path);
+
+ aml_append(dev_container, aml_name_decl("_HID", aml_string("PNP0A06")));
+ aml_append(dev_container,
+ aml_name_decl("_UID", aml_string("DIMM devices")));
+
+ assert(nr_mem <= ACPI_MAX_RAM_SLOTS);
+ aml_append(dev_container,
+ aml_name_decl(MEMORY_SLOTS_NUMBER, aml_int(nr_mem))
+ );
+
+ field = aml_field(mmio_path, AML_DWORD_ACC,
+ AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, /* read only */
+ aml_named_field(MEMORY_SLOT_ADDR_LOW, 32));
+ aml_append(field, /* read only */
+ aml_named_field(MEMORY_SLOT_ADDR_HIGH, 32));
+ aml_append(field, /* read only */
+ aml_named_field(MEMORY_SLOT_SIZE_LOW, 32));
+ aml_append(field, /* read only */
+ aml_named_field(MEMORY_SLOT_SIZE_HIGH, 32));
+ aml_append(field, /* read only */
+ aml_named_field(MEMORY_SLOT_PROXIMITY, 32));
+ aml_append(dev_container, field);
+
+ field = aml_field(mmio_path, AML_BYTE_ACC,
+ AML_NOLOCK, AML_WRITE_AS_ZEROS);
+ aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */));
+ aml_append(field, /* 1 if enabled, read only */
+ aml_named_field(MEMORY_SLOT_ENABLED, 1));
+ aml_append(field,
+ /*(read) 1 if has a insert event. (write) 1 to clear event */
+ aml_named_field(MEMORY_SLOT_INSERT_EVENT, 1));
+ aml_append(field,
+ /* (read) 1 if has a remove event. (write) 1 to clear event */
+ aml_named_field(MEMORY_SLOT_REMOVE_EVENT, 1));
+ aml_append(field,
+ /* initiates device eject, write only */
+ aml_named_field(MEMORY_SLOT_EJECT, 1));
+ aml_append(dev_container, field);
+
+ field = aml_field(mmio_path, AML_DWORD_ACC,
+ AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, /* DIMM selector, write only */
+ aml_named_field(MEMORY_SLOT_SLECTOR, 32));
+ aml_append(field, /* _OST event code, write only */
+ aml_named_field(MEMORY_SLOT_OST_EVENT, 32));
+ aml_append(field, /* _OST status code, write only */
+ aml_named_field(MEMORY_SLOT_OST_STATUS, 32));
+ aml_append(dev_container, field);
+ g_free(mmio_path);
+
+ method = aml_method("_STA", 0, AML_NOTSERIALIZED);
+ ifctx = aml_if(aml_equal(slots_nr, zero));
+ {
+ aml_append(ifctx, aml_return(zero));
+ }
+ aml_append(method, ifctx);
+ /* present, functioning, decoding, not shown in UI */
+ aml_append(method, aml_return(aml_int(0xB)));
+ aml_append(dev_container, method);
+
+ aml_append(dev_container, aml_mutex(MEMORY_SLOT_LOCK, 0));
+
+ method = aml_method(MEMORY_SLOT_SCAN_METHOD, 0, AML_NOTSERIALIZED);
+ {
+ Aml *else_ctx;
+ Aml *while_ctx;
+ Aml *idx = aml_local(0);
+ Aml *eject_req = aml_int(3);
+ Aml *dev_chk = aml_int(1);
+
+ ifctx = aml_if(aml_equal(slots_nr, zero));
+ {
+ aml_append(ifctx, aml_return(zero));
+ }
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_store(zero, idx));
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ /* build AML that:
+ * loops over all slots and Notifies DIMMs with
+ * Device Check or Eject Request notifications if
+ * slot has corresponding status bit set and clears
+ * slot status.
+ */
+ while_ctx = aml_while(aml_lless(idx, slots_nr));
+ {
+ Aml *ins_evt = aml_name(MEMORY_SLOT_INSERT_EVENT);
+ Aml *rm_evt = aml_name(MEMORY_SLOT_REMOVE_EVENT);
+
+ aml_append(while_ctx, aml_store(idx, slot_selector));
+ ifctx = aml_if(aml_equal(ins_evt, one));
+ {
+ aml_append(ifctx,
+ aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
+ idx, dev_chk));
+ aml_append(ifctx, aml_store(one, ins_evt));
+ }
+ aml_append(while_ctx, ifctx);
+
+ else_ctx = aml_else();
+ ifctx = aml_if(aml_equal(rm_evt, one));
+ {
+ aml_append(ifctx,
+ aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
+ idx, eject_req));
+ aml_append(ifctx, aml_store(one, rm_evt));
+ }
+ aml_append(else_ctx, ifctx);
+ aml_append(while_ctx, else_ctx);
+
+ aml_append(while_ctx, aml_add(idx, one, idx));
+ }
+ aml_append(method, while_ctx);
+ aml_append(method, aml_release(ctrl_lock));
+ aml_append(method, aml_return(one));
+ }
+ aml_append(dev_container, method);
+
+ method = aml_method(MEMORY_SLOT_STATUS_METHOD, 1, AML_NOTSERIALIZED);
+ {
+ Aml *slot_enabled = aml_name(MEMORY_SLOT_ENABLED);
+
+ aml_append(method, aml_store(zero, ret_val));
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method,
+ aml_store(aml_to_integer(slot_arg0), slot_selector));
+
+ ifctx = aml_if(aml_equal(slot_enabled, one));
+ {
+ aml_append(ifctx, aml_store(aml_int(0xF), ret_val));
+ }
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_release(ctrl_lock));
+ aml_append(method, aml_return(ret_val));
+ }
+ aml_append(dev_container, method);
+
+ method = aml_method(MEMORY_SLOT_CRS_METHOD, 1, AML_SERIALIZED);
+ {
+ Aml *mr64 = aml_name("MR64");
+ Aml *mr32 = aml_name("MR32");
+ Aml *crs_tmpl = aml_resource_template();
+ Aml *minl = aml_name("MINL");
+ Aml *minh = aml_name("MINH");
+ Aml *maxl = aml_name("MAXL");
+ Aml *maxh = aml_name("MAXH");
+ Aml *lenl = aml_name("LENL");
+ Aml *lenh = aml_name("LENH");
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(aml_to_integer(slot_arg0),
+ slot_selector));
+
+ aml_append(crs_tmpl,
+ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_CACHEABLE, AML_READ_WRITE,
+ 0, 0x0, 0xFFFFFFFFFFFFFFFEULL, 0,
+ 0xFFFFFFFFFFFFFFFFULL));
+ aml_append(method, aml_name_decl("MR64", crs_tmpl));
+ aml_append(method,
+ aml_create_dword_field(mr64, aml_int(14), "MINL"));
+ aml_append(method,
+ aml_create_dword_field(mr64, aml_int(18), "MINH"));
+ aml_append(method,
+ aml_create_dword_field(mr64, aml_int(38), "LENL"));
+ aml_append(method,
+ aml_create_dword_field(mr64, aml_int(42), "LENH"));
+ aml_append(method,
+ aml_create_dword_field(mr64, aml_int(22), "MAXL"));
+ aml_append(method,
+ aml_create_dword_field(mr64, aml_int(26), "MAXH"));
+
+ aml_append(method,
+ aml_store(aml_name(MEMORY_SLOT_ADDR_HIGH), minh));
+ aml_append(method,
+ aml_store(aml_name(MEMORY_SLOT_ADDR_LOW), minl));
+ aml_append(method,
+ aml_store(aml_name(MEMORY_SLOT_SIZE_HIGH), lenh));
+ aml_append(method,
+ aml_store(aml_name(MEMORY_SLOT_SIZE_LOW), lenl));
+
+ /* 64-bit math: MAX = MIN + LEN - 1 */
+ aml_append(method, aml_add(minl, lenl, maxl));
+ aml_append(method, aml_add(minh, lenh, maxh));
+ ifctx = aml_if(aml_lless(maxl, minl));
+ {
+ aml_append(ifctx, aml_add(maxh, one, maxh));
+ }
+ aml_append(method, ifctx);
+ ifctx = aml_if(aml_lless(maxl, one));
+ {
+ aml_append(ifctx, aml_subtract(maxh, one, maxh));
+ }
+ aml_append(method, ifctx);
+ aml_append(method, aml_subtract(maxl, one, maxl));
+
+ /* return 32-bit _CRS if addr/size is in low mem */
+ /* TODO: remove it since all hotplugged DIMMs are in high mem */
+ ifctx = aml_if(aml_equal(maxh, zero));
+ {
+ crs_tmpl = aml_resource_template();
+ aml_append(crs_tmpl,
+ aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
+ AML_MAX_FIXED, AML_CACHEABLE,
+ AML_READ_WRITE,
+ 0, 0x0, 0xFFFFFFFE, 0,
+ 0xFFFFFFFF));
+ aml_append(ifctx, aml_name_decl("MR32", crs_tmpl));
+ aml_append(ifctx,
+ aml_create_dword_field(mr32, aml_int(10), "MIN"));
+ aml_append(ifctx,
+ aml_create_dword_field(mr32, aml_int(14), "MAX"));
+ aml_append(ifctx,
+ aml_create_dword_field(mr32, aml_int(22), "LEN"));
+ aml_append(ifctx, aml_store(minl, aml_name("MIN")));
+ aml_append(ifctx, aml_store(maxl, aml_name("MAX")));
+ aml_append(ifctx, aml_store(lenl, aml_name("LEN")));
+
+ aml_append(ifctx, aml_release(ctrl_lock));
+ aml_append(ifctx, aml_return(mr32));
+ }
+ aml_append(method, ifctx);
+
+ aml_append(method, aml_release(ctrl_lock));
+ aml_append(method, aml_return(mr64));
+ }
+ aml_append(dev_container, method);
+
+ method = aml_method(MEMORY_SLOT_PROXIMITY_METHOD, 1,
+ AML_NOTSERIALIZED);
+ {
+ Aml *proximity = aml_name(MEMORY_SLOT_PROXIMITY);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(aml_to_integer(slot_arg0),
+ slot_selector));
+ aml_append(method, aml_store(proximity, ret_val));
+ aml_append(method, aml_release(ctrl_lock));
+ aml_append(method, aml_return(ret_val));
+ }
+ aml_append(dev_container, method);
+
+ method = aml_method(MEMORY_SLOT_OST_METHOD, 4, AML_NOTSERIALIZED);
+ {
+ Aml *ost_evt = aml_name(MEMORY_SLOT_OST_EVENT);
+ Aml *ost_status = aml_name(MEMORY_SLOT_OST_STATUS);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(aml_to_integer(slot_arg0),
+ slot_selector));
+ aml_append(method, aml_store(aml_arg(1), ost_evt));
+ aml_append(method, aml_store(aml_arg(2), ost_status));
+ aml_append(method, aml_release(ctrl_lock));
+ }
+ aml_append(dev_container, method);
+
+ method = aml_method(MEMORY_SLOT_EJECT_METHOD, 2, AML_NOTSERIALIZED);
+ {
+ Aml *eject = aml_name(MEMORY_SLOT_EJECT);
+
+ aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+ aml_append(method, aml_store(aml_to_integer(slot_arg0),
+ slot_selector));
+ aml_append(method, aml_store(one, eject));
+ aml_append(method, aml_release(ctrl_lock));
+ }
+ aml_append(dev_container, method);
+
+ /* build memory devices */
+ for (i = 0; i < nr_mem; i++) {
+ Aml *dev;
+ const char *s;
+
+ dev = aml_device("MP%02X", i);
+ aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i)));
+ aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80")));
+
+ method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
+ s = MEMORY_SLOT_CRS_METHOD;
+ aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
+ aml_append(dev, method);
+
+ method = aml_method("_STA", 0, AML_NOTSERIALIZED);
+ s = MEMORY_SLOT_STATUS_METHOD;
+ aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
+ aml_append(dev, method);
+
+ method = aml_method("_PXM", 0, AML_NOTSERIALIZED);
+ s = MEMORY_SLOT_PROXIMITY_METHOD;
+ aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
+ aml_append(dev, method);
+
+ method = aml_method("_OST", 3, AML_NOTSERIALIZED);
+ s = MEMORY_SLOT_OST_METHOD;
+ aml_append(method, aml_return(aml_call4(
+ s, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2)
+ )));
+ aml_append(dev, method);
+
+ method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
+ s = MEMORY_SLOT_EJECT_METHOD;
+ aml_append(method, aml_return(aml_call2(
+ s, aml_name("_UID"), aml_arg(0))));
+ aml_append(dev, method);
+
+ aml_append(dev_container, dev);
+ }
+
+ /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
+ * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... }
+ */
+ method = aml_method(MEMORY_SLOT_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
+ for (i = 0; i < nr_mem; i++) {
+ ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i)));
+ aml_append(ifctx,
+ aml_notify(aml_name("MP%.02X", i), aml_arg(1))
+ );
+ aml_append(method, ifctx);
+ }
+ aml_append(dev_container, method);
+ }
+ aml_append(table, dev_container);
+
+ method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED);
+ aml_append(method,
+ aml_call0(MEMORY_DEVICES_CONTAINER "." MEMORY_SLOT_SCAN_METHOD));
+ aml_append(table, method);
+
+ g_free(mhp_res_path);
+}
diff --git a/hw/acpi/memory_hotplug_acpi_table.c b/hw/acpi/memory_hotplug_acpi_table.c
deleted file mode 100644
index c75660215d..0000000000
--- a/hw/acpi/memory_hotplug_acpi_table.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Memory hotplug AML code of DSDT ACPI table
- *
- * Copyright (C) 2015 Red Hat Inc
- *
- * Author: Igor Mammedov <imammedo@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#include "qemu/osdep.h"
-#include "hw/acpi/memory_hotplug.h"
-#include "include/hw/acpi/pc-hotplug.h"
-#include "hw/boards.h"
-
-void build_memory_hotplug_aml(Aml *ctx, uint32_t nr_mem,
- uint16_t io_base, uint16_t io_len)
-{
- Aml *ifctx;
- Aml *method;
- Aml *pci_scope;
- Aml *mem_ctrl_dev;
-
- /* scope for memory hotplug controller device node */
- pci_scope = aml_scope("_SB.PCI0");
- mem_ctrl_dev = aml_device(MEMORY_HOTPLUG_DEVICE);
- {
- Aml *one = aml_int(1);
- Aml *zero = aml_int(0);
- Aml *ret_val = aml_local(0);
- Aml *slot_arg0 = aml_arg(0);
- Aml *slots_nr = aml_name(MEMORY_SLOTS_NUMBER);
- Aml *ctrl_lock = aml_name(MEMORY_SLOT_LOCK);
- Aml *slot_selector = aml_name(MEMORY_SLOT_SLECTOR);
-
- aml_append(mem_ctrl_dev, aml_name_decl("_HID", aml_string("PNP0A06")));
- aml_append(mem_ctrl_dev,
- aml_name_decl("_UID", aml_string("Memory hotplug resources")));
-
- method = aml_method("_STA", 0, AML_NOTSERIALIZED);
- ifctx = aml_if(aml_equal(slots_nr, zero));
- {
- aml_append(ifctx, aml_return(zero));
- }
- aml_append(method, ifctx);
- /* present, functioning, decoding, not shown in UI */
- aml_append(method, aml_return(aml_int(0xB)));
- aml_append(mem_ctrl_dev, method);
-
- aml_append(mem_ctrl_dev, aml_mutex(MEMORY_SLOT_LOCK, 0));
-
- method = aml_method(MEMORY_SLOT_SCAN_METHOD, 0, AML_NOTSERIALIZED);
- {
- Aml *else_ctx;
- Aml *while_ctx;
- Aml *idx = aml_local(0);
- Aml *eject_req = aml_int(3);
- Aml *dev_chk = aml_int(1);
-
- ifctx = aml_if(aml_equal(slots_nr, zero));
- {
- aml_append(ifctx, aml_return(zero));
- }
- aml_append(method, ifctx);
-
- aml_append(method, aml_store(zero, idx));
- aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
- /* build AML that:
- * loops over all slots and Notifies DIMMs with
- * Device Check or Eject Request notifications if
- * slot has corresponding status bit set and clears
- * slot status.
- */
- while_ctx = aml_while(aml_lless(idx, slots_nr));
- {
- Aml *ins_evt = aml_name(MEMORY_SLOT_INSERT_EVENT);
- Aml *rm_evt = aml_name(MEMORY_SLOT_REMOVE_EVENT);
-
- aml_append(while_ctx, aml_store(idx, slot_selector));
- ifctx = aml_if(aml_equal(ins_evt, one));
- {
- aml_append(ifctx,
- aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
- idx, dev_chk));
- aml_append(ifctx, aml_store(one, ins_evt));
- }
- aml_append(while_ctx, ifctx);
-
- else_ctx = aml_else();
- ifctx = aml_if(aml_equal(rm_evt, one));
- {
- aml_append(ifctx,
- aml_call2(MEMORY_SLOT_NOTIFY_METHOD,
- idx, eject_req));
- aml_append(ifctx, aml_store(one, rm_evt));
- }
- aml_append(else_ctx, ifctx);
- aml_append(while_ctx, else_ctx);
-
- aml_append(while_ctx, aml_add(idx, one, idx));
- }
- aml_append(method, while_ctx);
- aml_append(method, aml_release(ctrl_lock));
- aml_append(method, aml_return(one));
- }
- aml_append(mem_ctrl_dev, method);
-
- method = aml_method(MEMORY_SLOT_STATUS_METHOD, 1, AML_NOTSERIALIZED);
- {
- Aml *slot_enabled = aml_name(MEMORY_SLOT_ENABLED);
-
- aml_append(method, aml_store(zero, ret_val));
- aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
- aml_append(method,
- aml_store(aml_to_integer(slot_arg0), slot_selector));
-
- ifctx = aml_if(aml_equal(slot_enabled, one));
- {
- aml_append(ifctx, aml_store(aml_int(0xF), ret_val));
- }
- aml_append(method, ifctx);
-
- aml_append(method, aml_release(ctrl_lock));
- aml_append(method, aml_return(ret_val));
- }
- aml_append(mem_ctrl_dev, method);
-
- method = aml_method(MEMORY_SLOT_CRS_METHOD, 1, AML_SERIALIZED);
- {
- Aml *mr64 = aml_name("MR64");
- Aml *mr32 = aml_name("MR32");
- Aml *crs_tmpl = aml_resource_template();
- Aml *minl = aml_name("MINL");
- Aml *minh = aml_name("MINH");
- Aml *maxl = aml_name("MAXL");
- Aml *maxh = aml_name("MAXH");
- Aml *lenl = aml_name("LENL");
- Aml *lenh = aml_name("LENH");
-
- aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
- aml_append(method, aml_store(aml_to_integer(slot_arg0),
- slot_selector));
-
- aml_append(crs_tmpl,
- aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
- AML_CACHEABLE, AML_READ_WRITE,
- 0, 0x0, 0xFFFFFFFFFFFFFFFEULL, 0,
- 0xFFFFFFFFFFFFFFFFULL));
- aml_append(method, aml_name_decl("MR64", crs_tmpl));
- aml_append(method,
- aml_create_dword_field(mr64, aml_int(14), "MINL"));
- aml_append(method,
- aml_create_dword_field(mr64, aml_int(18), "MINH"));
- aml_append(method,
- aml_create_dword_field(mr64, aml_int(38), "LENL"));
- aml_append(method,
- aml_create_dword_field(mr64, aml_int(42), "LENH"));
- aml_append(method,
- aml_create_dword_field(mr64, aml_int(22), "MAXL"));
- aml_append(method,
- aml_create_dword_field(mr64, aml_int(26), "MAXH"));
-
- aml_append(method,
- aml_store(aml_name(MEMORY_SLOT_ADDR_HIGH), minh));
- aml_append(method,
- aml_store(aml_name(MEMORY_SLOT_ADDR_LOW), minl));
- aml_append(method,
- aml_store(aml_name(MEMORY_SLOT_SIZE_HIGH), lenh));
- aml_append(method,
- aml_store(aml_name(MEMORY_SLOT_SIZE_LOW), lenl));
-
- /* 64-bit math: MAX = MIN + LEN - 1 */
- aml_append(method, aml_add(minl, lenl, maxl));
- aml_append(method, aml_add(minh, lenh, maxh));
- ifctx = aml_if(aml_lless(maxl, minl));
- {
- aml_append(ifctx, aml_add(maxh, one, maxh));
- }
- aml_append(method, ifctx);
- ifctx = aml_if(aml_lless(maxl, one));
- {
- aml_append(ifctx, aml_subtract(maxh, one, maxh));
- }
- aml_append(method, ifctx);
- aml_append(method, aml_subtract(maxl, one, maxl));
-
- /* return 32-bit _CRS if addr/size is in low mem */
- /* TODO: remove it since all hotplugged DIMMs are in high mem */
- ifctx = aml_if(aml_equal(maxh, zero));
- {
- crs_tmpl = aml_resource_template();
- aml_append(crs_tmpl,
- aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED,
- AML_MAX_FIXED, AML_CACHEABLE,
- AML_READ_WRITE,
- 0, 0x0, 0xFFFFFFFE, 0,
- 0xFFFFFFFF));
- aml_append(ifctx, aml_name_decl("MR32", crs_tmpl));
- aml_append(ifctx,
- aml_create_dword_field(mr32, aml_int(10), "MIN"));
- aml_append(ifctx,
- aml_create_dword_field(mr32, aml_int(14), "MAX"));
- aml_append(ifctx,
- aml_create_dword_field(mr32, aml_int(22), "LEN"));
- aml_append(ifctx, aml_store(minl, aml_name("MIN")));
- aml_append(ifctx, aml_store(maxl, aml_name("MAX")));
- aml_append(ifctx, aml_store(lenl, aml_name("LEN")));
-
- aml_append(ifctx, aml_release(ctrl_lock));
- aml_append(ifctx, aml_return(mr32));
- }
- aml_append(method, ifctx);
-
- aml_append(method, aml_release(ctrl_lock));
- aml_append(method, aml_return(mr64));
- }
- aml_append(mem_ctrl_dev, method);
-
- method = aml_method(MEMORY_SLOT_PROXIMITY_METHOD, 1,
- AML_NOTSERIALIZED);
- {
- Aml *proximity = aml_name(MEMORY_SLOT_PROXIMITY);
-
- aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
- aml_append(method, aml_store(aml_to_integer(slot_arg0),
- slot_selector));
- aml_append(method, aml_store(proximity, ret_val));
- aml_append(method, aml_release(ctrl_lock));
- aml_append(method, aml_return(ret_val));
- }
- aml_append(mem_ctrl_dev, method);
-
- method = aml_method(MEMORY_SLOT_OST_METHOD, 4, AML_NOTSERIALIZED);
- {
- Aml *ost_evt = aml_name(MEMORY_SLOT_OST_EVENT);
- Aml *ost_status = aml_name(MEMORY_SLOT_OST_STATUS);
-
- aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
- aml_append(method, aml_store(aml_to_integer(slot_arg0),
- slot_selector));
- aml_append(method, aml_store(aml_arg(1), ost_evt));
- aml_append(method, aml_store(aml_arg(2), ost_status));
- aml_append(method, aml_release(ctrl_lock));
- }
- aml_append(mem_ctrl_dev, method);
-
- method = aml_method(MEMORY_SLOT_EJECT_METHOD, 2, AML_NOTSERIALIZED);
- {
- Aml *eject = aml_name(MEMORY_SLOT_EJECT);
-
- aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
- aml_append(method, aml_store(aml_to_integer(slot_arg0),
- slot_selector));
- aml_append(method, aml_store(one, eject));
- aml_append(method, aml_release(ctrl_lock));
- }
- aml_append(mem_ctrl_dev, method);
- }
- aml_append(pci_scope, mem_ctrl_dev);
- aml_append(ctx, pci_scope);
-}
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index 17d36bd595..6d99fe407c 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -644,7 +644,8 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
PIIX4_CPU_HOTPLUG_IO_BASE);
if (s->acpi_memory_hotplug.is_enabled) {
- acpi_memory_hotplug_init(parent, OBJECT(s), &s->acpi_memory_hotplug);
+ acpi_memory_hotplug_init(parent, OBJECT(s), &s->acpi_memory_hotplug,
+ ACPI_MEMORY_HOTPLUG_BASE);
}
}
diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c
index c7206fda6d..a92c2f1c36 100644
--- a/hw/arm/aspeed.c
+++ b/hw/arm/aspeed.c
@@ -20,6 +20,8 @@
#include "qemu/log.h"
#include "sysemu/block-backend.h"
#include "sysemu/blockdev.h"
+#include "hw/loader.h"
+#include "qemu/error-report.h"
static struct arm_boot_info aspeed_board_binfo = {
.board_id = -1, /* device-tree-only board */
@@ -34,13 +36,18 @@ typedef struct AspeedBoardState {
typedef struct AspeedBoardConfig {
const char *soc_name;
uint32_t hw_strap1;
+ const char *fmc_model;
+ const char *spi_model;
+ uint32_t num_cs;
} AspeedBoardConfig;
enum {
PALMETTO_BMC,
AST2500_EVB,
+ ROMULUS_BMC,
};
+/* Palmetto hardware value: 0x120CE416 */
#define PALMETTO_BMC_HW_STRAP1 ( \
SCU_AST2400_HW_STRAP_DRAM_SIZE(DRAM_SIZE_256MB) | \
SCU_AST2400_HW_STRAP_DRAM_CONFIG(2 /* DDR3 with CL=6, CWL=5 */) | \
@@ -54,6 +61,7 @@ enum {
SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \
SCU_AST2400_HW_STRAP_BOOT_MODE(AST2400_SPI_BOOT))
+/* AST2500 evb hardware value: 0xF100C2E6 */
#define AST2500_EVB_HW_STRAP1 (( \
AST2500_HW_STRAP1_DEFAULTS | \
SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \
@@ -64,11 +72,62 @@ enum {
SCU_HW_STRAP_MAC0_RGMII) & \
~SCU_HW_STRAP_2ND_BOOT_WDT)
+/* Romulus hardware value: 0xF10AD206 */
+#define ROMULUS_BMC_HW_STRAP1 ( \
+ AST2500_HW_STRAP1_DEFAULTS | \
+ SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \
+ SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \
+ SCU_AST2500_HW_STRAP_UART_DEBUG | \
+ SCU_AST2500_HW_STRAP_DDR4_ENABLE | \
+ SCU_AST2500_HW_STRAP_ACPI_ENABLE | \
+ SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER))
+
static const AspeedBoardConfig aspeed_boards[] = {
- [PALMETTO_BMC] = { "ast2400-a0", PALMETTO_BMC_HW_STRAP1 },
- [AST2500_EVB] = { "ast2500-a1", AST2500_EVB_HW_STRAP1 },
+ [PALMETTO_BMC] = {
+ .soc_name = "ast2400-a1",
+ .hw_strap1 = PALMETTO_BMC_HW_STRAP1,
+ .fmc_model = "n25q256a",
+ .spi_model = "mx25l25635e",
+ .num_cs = 1,
+ },
+ [AST2500_EVB] = {
+ .soc_name = "ast2500-a1",
+ .hw_strap1 = AST2500_EVB_HW_STRAP1,
+ .fmc_model = "n25q256a",
+ .spi_model = "mx25l25635e",
+ .num_cs = 1,
+ },
+ [ROMULUS_BMC] = {
+ .soc_name = "ast2500-a1",
+ .hw_strap1 = ROMULUS_BMC_HW_STRAP1,
+ .fmc_model = "n25q256a",
+ .spi_model = "mx66l1g45g",
+ .num_cs = 2,
+ },
};
+#define FIRMWARE_ADDR 0x0
+
+static void write_boot_rom(DriveInfo *dinfo, hwaddr addr, size_t rom_size,
+ Error **errp)
+{
+ BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
+ uint8_t *storage;
+
+ if (rom_size > blk_getlength(blk)) {
+ rom_size = blk_getlength(blk);
+ }
+
+ storage = g_new0(uint8_t, rom_size);
+ if (blk_pread(blk, 0, storage, rom_size) < 0) {
+ error_setg(errp, "failed to read the initial flash content");
+ return;
+ }
+
+ rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr);
+ g_free(storage);
+}
+
static void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype,
Error **errp)
{
@@ -100,6 +159,7 @@ static void aspeed_board_init(MachineState *machine,
{
AspeedBoardState *bmc;
AspeedSoCClass *sc;
+ DriveInfo *drive0 = drive_get(IF_MTD, 0, 0);
bmc = g_new0(AspeedBoardState, 1);
object_initialize(&bmc->soc, (sizeof(bmc->soc)), cfg->soc_name);
@@ -112,6 +172,8 @@ static void aspeed_board_init(MachineState *machine,
&error_abort);
object_property_set_int(OBJECT(&bmc->soc), cfg->hw_strap1, "hw-strap1",
&error_abort);
+ object_property_set_int(OBJECT(&bmc->soc), cfg->num_cs, "num-cs",
+ &error_abort);
object_property_set_bool(OBJECT(&bmc->soc), true, "realized",
&error_abort);
@@ -128,8 +190,24 @@ static void aspeed_board_init(MachineState *machine,
object_property_add_const_link(OBJECT(&bmc->soc), "ram", OBJECT(&bmc->ram),
&error_abort);
- aspeed_board_init_flashes(&bmc->soc.fmc, "n25q256a", &error_abort);
- aspeed_board_init_flashes(&bmc->soc.spi[0], "mx25l25635e", &error_abort);
+ aspeed_board_init_flashes(&bmc->soc.fmc, cfg->fmc_model, &error_abort);
+ aspeed_board_init_flashes(&bmc->soc.spi[0], cfg->spi_model, &error_abort);
+
+ /* Install first FMC flash content as a boot rom. */
+ if (drive0) {
+ AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0];
+ MemoryRegion *boot_rom = g_new(MemoryRegion, 1);
+
+ /*
+ * create a ROM region using the default mapping window size of
+ * the flash module.
+ */
+ memory_region_init_rom(boot_rom, OBJECT(bmc), "aspeed.boot_rom",
+ fl->size, &error_abort);
+ memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR,
+ boot_rom);
+ write_boot_rom(drive0, FIRMWARE_ADDR, fl->size, &error_abort);
+ }
aspeed_board_binfo.kernel_filename = machine->kernel_filename;
aspeed_board_binfo.initrd_filename = machine->initrd_filename;
@@ -188,10 +266,35 @@ static const TypeInfo ast2500_evb_type = {
.class_init = ast2500_evb_class_init,
};
+static void romulus_bmc_init(MachineState *machine)
+{
+ aspeed_board_init(machine, &aspeed_boards[ROMULUS_BMC]);
+}
+
+static void romulus_bmc_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "OpenPOWER Romulus BMC (ARM1176)";
+ mc->init = romulus_bmc_init;
+ mc->max_cpus = 1;
+ mc->no_sdcard = 1;
+ mc->no_floppy = 1;
+ mc->no_cdrom = 1;
+ mc->no_parallel = 1;
+}
+
+static const TypeInfo romulus_bmc_type = {
+ .name = MACHINE_TYPE_NAME("romulus-bmc"),
+ .parent = TYPE_MACHINE,
+ .class_init = romulus_bmc_class_init,
+};
+
static void aspeed_machine_init(void)
{
type_register_static(&palmetto_bmc_type);
type_register_static(&ast2500_evb_type);
+ type_register_static(&romulus_bmc_type);
}
type_init(aspeed_machine_init)
diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c
index e14f5c217e..b3e7f07b61 100644
--- a/hw/arm/aspeed_soc.c
+++ b/hw/arm/aspeed_soc.c
@@ -29,6 +29,7 @@
#define ASPEED_SOC_VIC_BASE 0x1E6C0000
#define ASPEED_SOC_SDMC_BASE 0x1E6E0000
#define ASPEED_SOC_SCU_BASE 0x1E6E2000
+#define ASPEED_SOC_SRAM_BASE 0x1E720000
#define ASPEED_SOC_TIMER_BASE 0x1E782000
#define ASPEED_SOC_I2C_BASE 0x1E78A000
@@ -47,15 +48,47 @@ static const char *aspeed_soc_ast2500_typenames[] = {
"aspeed.smc.ast2500-spi1", "aspeed.smc.ast2500-spi2" };
static const AspeedSoCInfo aspeed_socs[] = {
- { "ast2400-a0", "arm926", AST2400_A0_SILICON_REV, AST2400_SDRAM_BASE,
- 1, aspeed_soc_ast2400_spi_bases,
- "aspeed.smc.fmc", aspeed_soc_ast2400_typenames },
- { "ast2400", "arm926", AST2400_A0_SILICON_REV, AST2400_SDRAM_BASE,
- 1, aspeed_soc_ast2400_spi_bases,
- "aspeed.smc.fmc", aspeed_soc_ast2400_typenames },
- { "ast2500-a1", "arm1176", AST2500_A1_SILICON_REV, AST2500_SDRAM_BASE,
- 2, aspeed_soc_ast2500_spi_bases,
- "aspeed.smc.ast2500-fmc", aspeed_soc_ast2500_typenames },
+ {
+ .name = "ast2400-a0",
+ .cpu_model = "arm926",
+ .silicon_rev = AST2400_A0_SILICON_REV,
+ .sdram_base = AST2400_SDRAM_BASE,
+ .sram_size = 0x8000,
+ .spis_num = 1,
+ .spi_bases = aspeed_soc_ast2400_spi_bases,
+ .fmc_typename = "aspeed.smc.fmc",
+ .spi_typename = aspeed_soc_ast2400_typenames,
+ }, {
+ .name = "ast2400-a1",
+ .cpu_model = "arm926",
+ .silicon_rev = AST2400_A1_SILICON_REV,
+ .sdram_base = AST2400_SDRAM_BASE,
+ .sram_size = 0x8000,
+ .spis_num = 1,
+ .spi_bases = aspeed_soc_ast2400_spi_bases,
+ .fmc_typename = "aspeed.smc.fmc",
+ .spi_typename = aspeed_soc_ast2400_typenames,
+ }, {
+ .name = "ast2400",
+ .cpu_model = "arm926",
+ .silicon_rev = AST2400_A0_SILICON_REV,
+ .sdram_base = AST2400_SDRAM_BASE,
+ .sram_size = 0x8000,
+ .spis_num = 1,
+ .spi_bases = aspeed_soc_ast2400_spi_bases,
+ .fmc_typename = "aspeed.smc.fmc",
+ .spi_typename = aspeed_soc_ast2400_typenames,
+ }, {
+ .name = "ast2500-a1",
+ .cpu_model = "arm1176",
+ .silicon_rev = AST2500_A1_SILICON_REV,
+ .sdram_base = AST2500_SDRAM_BASE,
+ .sram_size = 0x9000,
+ .spis_num = 2,
+ .spi_bases = aspeed_soc_ast2500_spi_bases,
+ .fmc_typename = "aspeed.smc.ast2500-fmc",
+ .spi_typename = aspeed_soc_ast2500_typenames,
+ },
};
/*
@@ -87,9 +120,13 @@ static void aspeed_soc_init(Object *obj)
{
AspeedSoCState *s = ASPEED_SOC(obj);
AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s);
+ char *cpu_typename;
int i;
- s->cpu = cpu_arm_init(sc->info->cpu_model);
+ cpu_typename = g_strdup_printf("%s-" TYPE_ARM_CPU, sc->info->cpu_model);
+ object_initialize(&s->cpu, sizeof(s->cpu), cpu_typename);
+ object_property_add_child(obj, "cpu", OBJECT(&s->cpu), NULL);
+ g_free(cpu_typename);
object_initialize(&s->vic, sizeof(s->vic), TYPE_ASPEED_VIC);
object_property_add_child(obj, "vic", OBJECT(&s->vic), NULL);
@@ -116,11 +153,13 @@ static void aspeed_soc_init(Object *obj)
object_initialize(&s->fmc, sizeof(s->fmc), sc->info->fmc_typename);
object_property_add_child(obj, "fmc", OBJECT(&s->fmc), NULL);
qdev_set_parent_bus(DEVICE(&s->fmc), sysbus_get_default());
+ object_property_add_alias(obj, "num-cs", OBJECT(&s->fmc), "num-cs",
+ &error_abort);
for (i = 0; i < sc->info->spis_num; i++) {
object_initialize(&s->spi[i], sizeof(s->spi[i]),
sc->info->spi_typename[i]);
- object_property_add_child(obj, "spi", OBJECT(&s->spi[i]), NULL);
+ object_property_add_child(obj, "spi[*]", OBJECT(&s->spi[i]), NULL);
qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
}
@@ -146,6 +185,24 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
memory_region_add_subregion_overlap(get_system_memory(),
ASPEED_SOC_IOMEM_BASE, &s->iomem, -1);
+ /* CPU */
+ object_property_set_bool(OBJECT(&s->cpu), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ /* SRAM */
+ memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram",
+ sc->info->sram_size, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ vmstate_register_ram_global(&s->sram);
+ memory_region_add_subregion(get_system_memory(), ASPEED_SOC_SRAM_BASE,
+ &s->sram);
+
/* VIC */
object_property_set_bool(OBJECT(&s->vic), true, "realized", &err);
if (err) {
@@ -154,9 +211,9 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
}
sysbus_mmio_map(SYS_BUS_DEVICE(&s->vic), 0, ASPEED_SOC_VIC_BASE);
sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 0,
- qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_IRQ));
+ qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_IRQ));
sysbus_connect_irq(SYS_BUS_DEVICE(&s->vic), 1,
- qdev_get_gpio_in(DEVICE(s->cpu), ARM_CPU_FIQ));
+ qdev_get_gpio_in(DEVICE(&s->cpu), ARM_CPU_FIQ));
/* Timer */
object_property_set_bool(OBJECT(&s->timerctrl), true, "realized", &err);
@@ -195,10 +252,8 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0,
qdev_get_gpio_in(DEVICE(&s->vic), 12));
- /* FMC */
- object_property_set_int(OBJECT(&s->fmc), 1, "num-cs", &err);
- object_property_set_bool(OBJECT(&s->fmc), true, "realized", &local_err);
- error_propagate(&err, local_err);
+ /* FMC, The number of CS is set at the board level */
+ object_property_set_bool(OBJECT(&s->fmc), true, "realized", &err);
if (err) {
error_propagate(errp, err);
return;
@@ -240,12 +295,6 @@ static void aspeed_soc_class_init(ObjectClass *oc, void *data)
sc->info = (AspeedSoCInfo *) data;
dc->realize = aspeed_soc_realize;
-
- /*
- * Reason: creates an ARM CPU, thus use after free(), see
- * arm_cpu_class_init()
- */
- dc->cannot_destroy_with_object_finalize_yet = true;
}
static const TypeInfo aspeed_soc_type_info = {
diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c
index 025b60843e..44e741fde3 100644
--- a/hw/arm/imx25_pdk.c
+++ b/hw/arm/imx25_pdk.c
@@ -139,7 +139,7 @@ static void imx25_pdk_init(MachineState *machine)
* of simple qtest. See "make check" for details.
*/
i2c_create_slave((I2CBus *)qdev_get_child_bus(DEVICE(&s->soc.i2c[0]),
- "i2c"),
+ "i2c-bus.0"),
"ds1338", 0x68);
}
}
diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c
index 21ea1d6210..d31b4577f0 100644
--- a/hw/arm/pxa2xx.c
+++ b/hw/arm/pxa2xx.c
@@ -1258,7 +1258,7 @@ static void pxa2xx_i2c_update(PXA2xxI2CState *s)
}
/* These are only stubs now. */
-static void pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event)
+static int pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event)
{
PXA2xxI2CSlaveState *slave = PXA2XX_I2C_SLAVE(i2c);
PXA2xxI2CState *s = slave->host;
@@ -1280,6 +1280,8 @@ static void pxa2xx_i2c_event(I2CSlave *i2c, enum i2c_event event)
break;
}
pxa2xx_i2c_update(s);
+
+ return 0;
}
static int pxa2xx_i2c_rx(I2CSlave *i2c)
@@ -1449,17 +1451,10 @@ static const VMStateDescription vmstate_pxa2xx_i2c = {
}
};
-static int pxa2xx_i2c_slave_init(I2CSlave *i2c)
-{
- /* Nothing to do. */
- return 0;
-}
-
static void pxa2xx_i2c_slave_class_init(ObjectClass *klass, void *data)
{
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
- k->init = pxa2xx_i2c_slave_init;
k->event = pxa2xx_i2c_event;
k->recv = pxa2xx_i2c_rx;
k->send = pxa2xx_i2c_tx;
@@ -2070,7 +2065,7 @@ PXA2xxState *pxa270_init(MemoryRegion *address_space,
}
if (!revision)
revision = "pxa270";
-
+
s->cpu = cpu_arm_init(revision);
if (s->cpu == NULL) {
fprintf(stderr, "Unable to find CPU definition\n");
diff --git a/hw/arm/tosa.c b/hw/arm/tosa.c
index 1ee12f49b3..c3db996930 100644
--- a/hw/arm/tosa.c
+++ b/hw/arm/tosa.c
@@ -172,7 +172,7 @@ static int tosa_dac_send(I2CSlave *i2c, uint8_t data)
return 0;
}
-static void tosa_dac_event(I2CSlave *i2c, enum i2c_event event)
+static int tosa_dac_event(I2CSlave *i2c, enum i2c_event event)
{
TosaDACState *s = TOSA_DAC(i2c);
@@ -194,6 +194,8 @@ static void tosa_dac_event(I2CSlave *i2c, enum i2c_event event)
default:
break;
}
+
+ return 0;
}
static int tosa_dac_recv(I2CSlave *s)
@@ -202,12 +204,6 @@ static int tosa_dac_recv(I2CSlave *s)
return -1;
}
-static int tosa_dac_init(I2CSlave *i2c)
-{
- /* Nothing to do. */
- return 0;
-}
-
static void tosa_tg_init(PXA2xxState *cpu)
{
I2CBus *bus = pxa2xx_i2c_bus(cpu->i2c[0]);
@@ -275,7 +271,6 @@ static void tosa_dac_class_init(ObjectClass *klass, void *data)
{
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
- k->init = tosa_dac_init;
k->event = tosa_dac_event;
k->recv = tosa_dac_recv;
k->send = tosa_dac_send;
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 7102686882..07a10aca40 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -29,7 +29,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu-common.h"
-#include "hw/arm/virt-acpi-build.h"
#include "qemu/bitmap.h"
#include "trace.h"
#include "qom/cpu.h"
@@ -43,6 +42,7 @@
#include "hw/acpi/aml-build.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
+#include "hw/arm/virt.h"
#include "sysemu/numa.h"
#include "kvm_arm.h"
@@ -310,6 +310,13 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
Aml *dev_rp0 = aml_device("%s", "RP0");
aml_append(dev_rp0, aml_name_decl("_ADR", aml_int(0)));
aml_append(dev, dev_rp0);
+
+ Aml *dev_res0 = aml_device("%s", "RES0");
+ aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));
+ crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(base_ecam, size_ecam, AML_READ_WRITE));
+ aml_append(dev_res0, aml_name_decl("_CRS", crs));
+ aml_append(dev, dev_res0);
aml_append(scope, dev);
}
@@ -384,7 +391,7 @@ build_rsdp(GArray *rsdp_table, BIOSLinker *linker, unsigned rsdt_tbl_offset)
}
static void
-build_iort(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+build_iort(GArray *table_data, BIOSLinker *linker)
{
int iort_start = table_data->len;
AcpiIortIdMapping *idmap;
@@ -439,11 +446,11 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
}
static void
-build_spcr(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
AcpiSerialPortConsoleRedirection *spcr;
- const MemMapEntry *uart_memmap = &guest_info->memmap[VIRT_UART];
- int irq = guest_info->irqmap[VIRT_UART] + ARM_SPI_BASE;
+ const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART];
+ int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE;
spcr = acpi_data_push(table_data, sizeof(*spcr));
@@ -472,16 +479,16 @@ build_spcr(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
}
static void
-build_srat(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
AcpiSystemResourceAffinityTable *srat;
AcpiSratProcessorGiccAffinity *core;
AcpiSratMemoryAffinity *numamem;
int i, j, srat_start;
uint64_t mem_base;
- uint32_t *cpu_node = g_malloc0(guest_info->smp_cpus * sizeof(uint32_t));
+ uint32_t *cpu_node = g_malloc0(vms->smp_cpus * sizeof(uint32_t));
- for (i = 0; i < guest_info->smp_cpus; i++) {
+ for (i = 0; i < vms->smp_cpus; i++) {
j = numa_get_node_for_cpu(i);
if (j < nb_numa_nodes) {
cpu_node[i] = j;
@@ -492,7 +499,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
srat = acpi_data_push(table_data, sizeof(*srat));
srat->reserved1 = cpu_to_le32(1);
- for (i = 0; i < guest_info->smp_cpus; ++i) {
+ for (i = 0; i < vms->smp_cpus; ++i) {
core = acpi_data_push(table_data, sizeof(*core));
core->type = ACPI_SRAT_PROCESSOR_GICC;
core->length = sizeof(*core);
@@ -502,7 +509,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
}
g_free(cpu_node);
- mem_base = guest_info->memmap[VIRT_MEM].base;
+ mem_base = vms->memmap[VIRT_MEM].base;
for (i = 0; i < nb_numa_nodes; ++i) {
numamem = acpi_data_push(table_data, sizeof(*numamem));
build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i,
@@ -515,10 +522,10 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
}
static void
-build_mcfg(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+build_mcfg(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
AcpiTableMcfg *mcfg;
- const MemMapEntry *memmap = guest_info->memmap;
+ const MemMapEntry *memmap = vms->memmap;
int len = sizeof(*mcfg) + sizeof(mcfg->allocation[0]);
mcfg = acpi_data_push(table_data, len);
@@ -535,24 +542,33 @@ build_mcfg(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
/* GTDT */
static void
-build_gtdt(GArray *table_data, BIOSLinker *linker)
+build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
int gtdt_start = table_data->len;
AcpiGenericTimerTable *gtdt;
+ uint32_t irqflags;
+
+ if (vmc->claim_edge_triggered_timers) {
+ irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE;
+ } else {
+ irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL;
+ }
gtdt = acpi_data_push(table_data, sizeof *gtdt);
/* The interrupt values are the same with the device tree when adding 16 */
- gtdt->secure_el1_interrupt = ARCH_TIMER_S_EL1_IRQ + 16;
- gtdt->secure_el1_flags = ACPI_EDGE_SENSITIVE;
+ gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16);
+ gtdt->secure_el1_flags = cpu_to_le32(irqflags);
- gtdt->non_secure_el1_interrupt = ARCH_TIMER_NS_EL1_IRQ + 16;
- gtdt->non_secure_el1_flags = ACPI_EDGE_SENSITIVE | ACPI_GTDT_ALWAYS_ON;
+ gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16);
+ gtdt->non_secure_el1_flags = cpu_to_le32(irqflags |
+ ACPI_GTDT_CAP_ALWAYS_ON);
- gtdt->virtual_timer_interrupt = ARCH_TIMER_VIRT_IRQ + 16;
- gtdt->virtual_timer_flags = ACPI_EDGE_SENSITIVE;
+ gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16);
+ gtdt->virtual_timer_flags = cpu_to_le32(irqflags);
- gtdt->non_secure_el2_interrupt = ARCH_TIMER_NS_EL2_IRQ + 16;
- gtdt->non_secure_el2_flags = ACPI_EDGE_SENSITIVE;
+ gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16);
+ gtdt->non_secure_el2_flags = cpu_to_le32(irqflags);
build_header(linker, table_data,
(void *)(table_data->data + gtdt_start), "GTDT",
@@ -561,11 +577,12 @@ build_gtdt(GArray *table_data, BIOSLinker *linker)
/* MADT */
static void
-build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
int madt_start = table_data->len;
- const MemMapEntry *memmap = guest_info->memmap;
- const int *irqmap = guest_info->irqmap;
+ const MemMapEntry *memmap = vms->memmap;
+ const int *irqmap = vms->irqmap;
AcpiMultipleApicTable *madt;
AcpiMadtGenericDistributor *gicd;
AcpiMadtGenericMsiFrame *gic_msi;
@@ -576,30 +593,33 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
gicd = acpi_data_push(table_data, sizeof *gicd);
gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR;
gicd->length = sizeof(*gicd);
- gicd->base_address = memmap[VIRT_GIC_DIST].base;
- gicd->version = guest_info->gic_version;
+ gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base);
+ gicd->version = vms->gic_version;
- for (i = 0; i < guest_info->smp_cpus; i++) {
- AcpiMadtGenericInterrupt *gicc = acpi_data_push(table_data,
- sizeof *gicc);
+ for (i = 0; i < vms->smp_cpus; i++) {
+ AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data,
+ sizeof(*gicc));
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i));
- gicc->type = ACPI_APIC_GENERIC_INTERRUPT;
+ gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE;
gicc->length = sizeof(*gicc);
- if (guest_info->gic_version == 2) {
- gicc->base_address = memmap[VIRT_GIC_CPU].base;
+ if (vms->gic_version == 2) {
+ gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
}
- gicc->cpu_interface_number = i;
- gicc->arm_mpidr = armcpu->mp_affinity;
- gicc->uid = i;
- gicc->flags = cpu_to_le32(ACPI_GICC_ENABLED);
+ gicc->cpu_interface_number = cpu_to_le32(i);
+ gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
+ gicc->uid = cpu_to_le32(i);
+ gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED);
if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
}
+ if (vms->virt && vms->gic_version == 3) {
+ gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ));
+ }
}
- if (guest_info->gic_version == 3) {
+ if (vms->gic_version == 3) {
AcpiMadtGenericTranslator *gic_its;
AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data,
sizeof *gicr);
@@ -609,7 +629,7 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base);
gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size);
- if (its_class_name() && !guest_info->no_its) {
+ if (its_class_name() && !vmc->no_its) {
gic_its = acpi_data_push(table_data, sizeof *gic_its);
gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR;
gic_its->length = sizeof(*gic_its);
@@ -633,16 +653,30 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
}
/* FADT */
-static void
-build_fadt(GArray *table_data, BIOSLinker *linker, unsigned dsdt_tbl_offset)
+static void build_fadt(GArray *table_data, BIOSLinker *linker,
+ VirtMachineState *vms, unsigned dsdt_tbl_offset)
{
AcpiFadtDescriptorRev5_1 *fadt = acpi_data_push(table_data, sizeof(*fadt));
unsigned dsdt_entry_offset = (char *)&fadt->dsdt - table_data->data;
+ uint16_t bootflags;
+
+ switch (vms->psci_conduit) {
+ case QEMU_PSCI_CONDUIT_DISABLED:
+ bootflags = 0;
+ break;
+ case QEMU_PSCI_CONDUIT_HVC:
+ bootflags = ACPI_FADT_ARM_PSCI_COMPLIANT | ACPI_FADT_ARM_PSCI_USE_HVC;
+ break;
+ case QEMU_PSCI_CONDUIT_SMC:
+ bootflags = ACPI_FADT_ARM_PSCI_COMPLIANT;
+ break;
+ default:
+ g_assert_not_reached();
+ }
- /* Hardware Reduced = 1 and use PSCI 0.2+ and with HVC */
+ /* Hardware Reduced = 1 and use PSCI 0.2+ */
fadt->flags = cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI);
- fadt->arm_boot_flags = cpu_to_le16((1 << ACPI_FADT_ARM_USE_PSCI_G_0_2) |
- (1 << ACPI_FADT_ARM_PSCI_USE_HVC));
+ fadt->arm_boot_flags = cpu_to_le16(bootflags);
/* ACPI v5.1 (fadt->revision.fadt->minor_revision) */
fadt->minor_revision = 0x1;
@@ -658,11 +692,11 @@ build_fadt(GArray *table_data, BIOSLinker *linker, unsigned dsdt_tbl_offset)
/* DSDT */
static void
-build_dsdt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
+build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
Aml *scope, *dsdt;
- const MemMapEntry *memmap = guest_info->memmap;
- const int *irqmap = guest_info->irqmap;
+ const MemMapEntry *memmap = vms->memmap;
+ const int *irqmap = vms->irqmap;
dsdt = init_aml_allocator();
/* Reserve space for header */
@@ -674,7 +708,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
* the RTC ACPI device at all when using UEFI.
*/
scope = aml_scope("\\_SB");
- acpi_dsdt_add_cpus(scope, guest_info->smp_cpus);
+ acpi_dsdt_add_cpus(scope, vms->smp_cpus);
acpi_dsdt_add_uart(scope, &memmap[VIRT_UART],
(irqmap[VIRT_UART] + ARM_SPI_BASE));
acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]);
@@ -682,7 +716,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtGuestInfo *guest_info)
acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO],
(irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS);
acpi_dsdt_add_pci(scope, memmap, (irqmap[VIRT_PCIE] + ARM_SPI_BASE),
- guest_info->use_highmem);
+ vms->highmem);
acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO],
(irqmap[VIRT_GPIO] + ARM_SPI_BASE));
acpi_dsdt_add_power_button(scope);
@@ -705,12 +739,12 @@ struct AcpiBuildState {
MemoryRegion *linker_mr;
/* Is table patched? */
bool patched;
- VirtGuestInfo *guest_info;
} AcpiBuildState;
static
-void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
+void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
GArray *table_offsets;
unsigned dsdt, rsdt;
GArray *tables_blob = tables->table_data;
@@ -724,32 +758,32 @@ void virt_acpi_build(VirtGuestInfo *guest_info, AcpiBuildTables *tables)
/* DSDT is pointed to by FADT */
dsdt = tables_blob->len;
- build_dsdt(tables_blob, tables->linker, guest_info);
+ build_dsdt(tables_blob, tables->linker, vms);
/* FADT MADT GTDT MCFG SPCR pointed to by RSDT */
acpi_add_table(table_offsets, tables_blob);
- build_fadt(tables_blob, tables->linker, dsdt);
+ build_fadt(tables_blob, tables->linker, vms, dsdt);
acpi_add_table(table_offsets, tables_blob);
- build_madt(tables_blob, tables->linker, guest_info);
+ build_madt(tables_blob, tables->linker, vms);
acpi_add_table(table_offsets, tables_blob);
- build_gtdt(tables_blob, tables->linker);
+ build_gtdt(tables_blob, tables->linker, vms);
acpi_add_table(table_offsets, tables_blob);
- build_mcfg(tables_blob, tables->linker, guest_info);
+ build_mcfg(tables_blob, tables->linker, vms);
acpi_add_table(table_offsets, tables_blob);
- build_spcr(tables_blob, tables->linker, guest_info);
+ build_spcr(tables_blob, tables->linker, vms);
if (nb_numa_nodes > 0) {
acpi_add_table(table_offsets, tables_blob);
- build_srat(tables_blob, tables->linker, guest_info);
+ build_srat(tables_blob, tables->linker, vms);
}
- if (its_class_name() && !guest_info->no_its) {
+ if (its_class_name() && !vmc->no_its) {
acpi_add_table(table_offsets, tables_blob);
- build_iort(tables_blob, tables->linker, guest_info);
+ build_iort(tables_blob, tables->linker);
}
/* RSDT is pointed to by RSDP */
@@ -788,13 +822,12 @@ static void virt_acpi_build_update(void *build_opaque)
acpi_build_tables_init(&tables);
- virt_acpi_build(build_state->guest_info, &tables);
+ virt_acpi_build(VIRT_MACHINE(qdev_get_machine()), &tables);
acpi_ram_update(build_state->table_mr, tables.table_data);
acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
-
acpi_build_tables_cleanup(&tables, true);
}
@@ -809,7 +842,7 @@ static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
uint64_t max_size)
{
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
- name, virt_acpi_build_update, build_state, NULL);
+ name, virt_acpi_build_update, build_state, NULL, true);
}
static const VMStateDescription vmstate_virt_acpi_build = {
@@ -822,12 +855,12 @@ static const VMStateDescription vmstate_virt_acpi_build = {
},
};
-void virt_acpi_setup(VirtGuestInfo *guest_info)
+void virt_acpi_setup(VirtMachineState *vms)
{
AcpiBuildTables tables;
AcpiBuildState *build_state;
- if (!guest_info->fw_cfg) {
+ if (!vms->fw_cfg) {
trace_virt_acpi_setup();
return;
}
@@ -838,10 +871,9 @@ void virt_acpi_setup(VirtGuestInfo *guest_info)
}
build_state = g_malloc0(sizeof *build_state);
- build_state->guest_info = guest_info;
acpi_build_tables_init(&tables);
- virt_acpi_build(build_state->guest_info, &tables);
+ virt_acpi_build(vms, &tables);
/* Now expose it all to Guest */
build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data,
@@ -853,8 +885,8 @@ void virt_acpi_setup(VirtGuestInfo *guest_info)
acpi_add_rom_blob(build_state, tables.linker->cmd_blob,
"etc/table-loader", 0);
- fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
- tables.tcpalog->data, acpi_data_len(tables.tcpalog));
+ fw_cfg_add_file(vms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data,
+ acpi_data_len(tables.tcpalog));
build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp,
ACPI_BUILD_RSDP_FILE, 0);
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index d04e4acbd9..6c9e8985bf 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -41,14 +41,12 @@
#include "sysemu/numa.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
-#include "hw/boards.h"
#include "hw/compat.h"
#include "hw/loader.h"
#include "exec/address-spaces.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "hw/pci-host/gpex.h"
-#include "hw/arm/virt-acpi-build.h"
#include "hw/arm/sysbus-fdt.h"
#include "hw/platform-bus.h"
#include "hw/arm/fdt.h"
@@ -59,51 +57,6 @@
#include "qapi/visitor.h"
#include "standard-headers/linux/input.h"
-/* Number of external interrupt lines to configure the GIC with */
-#define NUM_IRQS 256
-
-#define PLATFORM_BUS_NUM_IRQS 64
-
-static ARMPlatformBusSystemParams platform_bus_params;
-
-typedef struct VirtBoardInfo {
- struct arm_boot_info bootinfo;
- const char *cpu_model;
- const MemMapEntry *memmap;
- const int *irqmap;
- int smp_cpus;
- void *fdt;
- int fdt_size;
- uint32_t clock_phandle;
- uint32_t gic_phandle;
- uint32_t msi_phandle;
- bool using_psci;
-} VirtBoardInfo;
-
-typedef struct {
- MachineClass parent;
- VirtBoardInfo *daughterboard;
- bool disallow_affinity_adjustment;
- bool no_its;
- bool no_pmu;
-} VirtMachineClass;
-
-typedef struct {
- MachineState parent;
- bool secure;
- bool highmem;
- int32_t gic_version;
-} VirtMachineState;
-
-#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
-#define VIRT_MACHINE(obj) \
- OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE)
-#define VIRT_MACHINE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtMachineClass, obj, TYPE_VIRT_MACHINE)
-#define VIRT_MACHINE_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE)
-
-
#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
static void virt_##major##_##minor##_class_init(ObjectClass *oc, \
void *data) \
@@ -133,6 +86,13 @@ typedef struct {
DEFINE_VIRT_MACHINE_LATEST(major, minor, false)
+/* Number of external interrupt lines to configure the GIC with */
+#define NUM_IRQS 256
+
+#define PLATFORM_BUS_NUM_IRQS 64
+
+static ARMPlatformBusSystemParams platform_bus_params;
+
/* RAM limit in GB. Since VIRT_MEM starts at the 1GB mark, this means
* RAM can go up to the 256GB mark, leaving 256GB of the physical
* address space unallocated and free for future use between 256G and 512G.
@@ -202,51 +162,35 @@ static const int a15irqmap[] = {
[VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
};
-static VirtBoardInfo machines[] = {
- {
- .cpu_model = "cortex-a15",
- .memmap = a15memmap,
- .irqmap = a15irqmap,
- },
- {
- .cpu_model = "cortex-a53",
- .memmap = a15memmap,
- .irqmap = a15irqmap,
- },
- {
- .cpu_model = "cortex-a57",
- .memmap = a15memmap,
- .irqmap = a15irqmap,
- },
- {
- .cpu_model = "host",
- .memmap = a15memmap,
- .irqmap = a15irqmap,
- },
+static const char *valid_cpus[] = {
+ "cortex-a15",
+ "cortex-a53",
+ "cortex-a57",
+ "host",
};
-static VirtBoardInfo *find_machine_info(const char *cpu)
+static bool cpuname_valid(const char *cpu)
{
int i;
- for (i = 0; i < ARRAY_SIZE(machines); i++) {
- if (strcmp(cpu, machines[i].cpu_model) == 0) {
- return &machines[i];
+ for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) {
+ if (strcmp(cpu, valid_cpus[i]) == 0) {
+ return true;
}
}
- return NULL;
+ return false;
}
-static void create_fdt(VirtBoardInfo *vbi)
+static void create_fdt(VirtMachineState *vms)
{
- void *fdt = create_device_tree(&vbi->fdt_size);
+ void *fdt = create_device_tree(&vms->fdt_size);
if (!fdt) {
error_report("create_device_tree() failed");
exit(1);
}
- vbi->fdt = fdt;
+ vms->fdt = fdt;
/* Header */
qemu_fdt_setprop_string(fdt, "/", "compatible", "linux,dummy-virt");
@@ -266,28 +210,38 @@ static void create_fdt(VirtBoardInfo *vbi)
* optional but in practice if you omit them the kernel refuses to
* probe for the device.
*/
- vbi->clock_phandle = qemu_fdt_alloc_phandle(fdt);
+ vms->clock_phandle = qemu_fdt_alloc_phandle(fdt);
qemu_fdt_add_subnode(fdt, "/apb-pclk");
qemu_fdt_setprop_string(fdt, "/apb-pclk", "compatible", "fixed-clock");
qemu_fdt_setprop_cell(fdt, "/apb-pclk", "#clock-cells", 0x0);
qemu_fdt_setprop_cell(fdt, "/apb-pclk", "clock-frequency", 24000000);
qemu_fdt_setprop_string(fdt, "/apb-pclk", "clock-output-names",
"clk24mhz");
- qemu_fdt_setprop_cell(fdt, "/apb-pclk", "phandle", vbi->clock_phandle);
+ qemu_fdt_setprop_cell(fdt, "/apb-pclk", "phandle", vms->clock_phandle);
}
-static void fdt_add_psci_node(const VirtBoardInfo *vbi)
+static void fdt_add_psci_node(const VirtMachineState *vms)
{
uint32_t cpu_suspend_fn;
uint32_t cpu_off_fn;
uint32_t cpu_on_fn;
uint32_t migrate_fn;
- void *fdt = vbi->fdt;
+ void *fdt = vms->fdt;
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0));
+ const char *psci_method;
- if (!vbi->using_psci) {
+ switch (vms->psci_conduit) {
+ case QEMU_PSCI_CONDUIT_DISABLED:
return;
+ case QEMU_PSCI_CONDUIT_HVC:
+ psci_method = "hvc";
+ break;
+ case QEMU_PSCI_CONDUIT_SMC:
+ psci_method = "smc";
+ break;
+ default:
+ g_assert_not_reached();
}
qemu_fdt_add_subnode(fdt, "/psci");
@@ -319,7 +273,7 @@ static void fdt_add_psci_node(const VirtBoardInfo *vbi)
* However, the device tree binding uses 'method' instead, so that is
* what we should use here.
*/
- qemu_fdt_setprop_string(fdt, "/psci", "method", "hvc");
+ qemu_fdt_setprop_string(fdt, "/psci", "method", psci_method);
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_suspend", cpu_suspend_fn);
qemu_fdt_setprop_cell(fdt, "/psci", "cpu_off", cpu_off_fn);
@@ -327,41 +281,60 @@ static void fdt_add_psci_node(const VirtBoardInfo *vbi)
qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn);
}
-static void fdt_add_timer_nodes(const VirtBoardInfo *vbi, int gictype)
+static void fdt_add_timer_nodes(const VirtMachineState *vms)
{
- /* Note that on A15 h/w these interrupts are level-triggered,
- * but for the GIC implementation provided by both QEMU and KVM
- * they are edge-triggered.
+ /* On real hardware these interrupts are level-triggered.
+ * On KVM they were edge-triggered before host kernel version 4.4,
+ * and level-triggered afterwards.
+ * On emulated QEMU they are level-triggered.
+ *
+ * Getting the DTB info about them wrong is awkward for some
+ * guest kernels:
+ * pre-4.8 ignore the DT and leave the interrupt configured
+ * with whatever the GIC reset value (or the bootloader) left it at
+ * 4.8 before rc6 honour the incorrect data by programming it back
+ * into the GIC, causing problems
+ * 4.8rc6 and later ignore the DT and always write "level triggered"
+ * into the GIC
+ *
+ * For backwards-compatibility, virt-2.8 and earlier will continue
+ * to say these are edge-triggered, but later machines will report
+ * the correct information.
*/
ARMCPU *armcpu;
- uint32_t irqflags = GIC_FDT_IRQ_FLAGS_EDGE_LO_HI;
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
+ uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI;
+
+ if (vmc->claim_edge_triggered_timers) {
+ irqflags = GIC_FDT_IRQ_FLAGS_EDGE_LO_HI;
+ }
- if (gictype == 2) {
+ if (vms->gic_version == 2) {
irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
GIC_FDT_IRQ_PPI_CPU_WIDTH,
- (1 << vbi->smp_cpus) - 1);
+ (1 << vms->smp_cpus) - 1);
}
- qemu_fdt_add_subnode(vbi->fdt, "/timer");
+ qemu_fdt_add_subnode(vms->fdt, "/timer");
armcpu = ARM_CPU(qemu_get_cpu(0));
if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) {
const char compat[] = "arm,armv8-timer\0arm,armv7-timer";
- qemu_fdt_setprop(vbi->fdt, "/timer", "compatible",
+ qemu_fdt_setprop(vms->fdt, "/timer", "compatible",
compat, sizeof(compat));
} else {
- qemu_fdt_setprop_string(vbi->fdt, "/timer", "compatible",
+ qemu_fdt_setprop_string(vms->fdt, "/timer", "compatible",
"arm,armv7-timer");
}
- qemu_fdt_setprop(vbi->fdt, "/timer", "always-on", NULL, 0);
- qemu_fdt_setprop_cells(vbi->fdt, "/timer", "interrupts",
+ qemu_fdt_setprop(vms->fdt, "/timer", "always-on", NULL, 0);
+ qemu_fdt_setprop_cells(vms->fdt, "/timer", "interrupts",
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_S_EL1_IRQ, irqflags,
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL1_IRQ, irqflags,
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_VIRT_IRQ, irqflags,
GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags);
}
-static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
+static void fdt_add_cpu_nodes(const VirtMachineState *vms)
{
int cpu;
int addr_cells = 1;
@@ -380,7 +353,7 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
* The simplest way to go is to examine affinity IDs of all our CPUs. If
* at least one of them has Aff3 populated, we set #address-cells to 2.
*/
- for (cpu = 0; cpu < vbi->smp_cpus; cpu++) {
+ for (cpu = 0; cpu < vms->smp_cpus; cpu++) {
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
if (armcpu->mp_affinity & ARM_AFF3_MASK) {
@@ -389,101 +362,107 @@ static void fdt_add_cpu_nodes(const VirtBoardInfo *vbi)
}
}
- qemu_fdt_add_subnode(vbi->fdt, "/cpus");
- qemu_fdt_setprop_cell(vbi->fdt, "/cpus", "#address-cells", addr_cells);
- qemu_fdt_setprop_cell(vbi->fdt, "/cpus", "#size-cells", 0x0);
+ qemu_fdt_add_subnode(vms->fdt, "/cpus");
+ qemu_fdt_setprop_cell(vms->fdt, "/cpus", "#address-cells", addr_cells);
+ qemu_fdt_setprop_cell(vms->fdt, "/cpus", "#size-cells", 0x0);
- for (cpu = vbi->smp_cpus - 1; cpu >= 0; cpu--) {
+ for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) {
char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "cpu");
- qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible",
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "cpu");
+ qemu_fdt_setprop_string(vms->fdt, nodename, "compatible",
armcpu->dtb_compatible);
- if (vbi->using_psci && vbi->smp_cpus > 1) {
- qemu_fdt_setprop_string(vbi->fdt, nodename,
+ if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED
+ && vms->smp_cpus > 1) {
+ qemu_fdt_setprop_string(vms->fdt, nodename,
"enable-method", "psci");
}
if (addr_cells == 2) {
- qemu_fdt_setprop_u64(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_u64(vms->fdt, nodename, "reg",
armcpu->mp_affinity);
} else {
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "reg",
armcpu->mp_affinity);
}
i = numa_get_node_for_cpu(cpu);
if (i < nb_numa_nodes) {
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "numa-node-id", i);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", i);
}
g_free(nodename);
}
}
-static void fdt_add_its_gic_node(VirtBoardInfo *vbi)
+static void fdt_add_its_gic_node(VirtMachineState *vms)
{
- vbi->msi_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
- qemu_fdt_add_subnode(vbi->fdt, "/intc/its");
- qemu_fdt_setprop_string(vbi->fdt, "/intc/its", "compatible",
+ vms->msi_phandle = qemu_fdt_alloc_phandle(vms->fdt);
+ qemu_fdt_add_subnode(vms->fdt, "/intc/its");
+ qemu_fdt_setprop_string(vms->fdt, "/intc/its", "compatible",
"arm,gic-v3-its");
- qemu_fdt_setprop(vbi->fdt, "/intc/its", "msi-controller", NULL, 0);
- qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc/its", "reg",
- 2, vbi->memmap[VIRT_GIC_ITS].base,
- 2, vbi->memmap[VIRT_GIC_ITS].size);
- qemu_fdt_setprop_cell(vbi->fdt, "/intc/its", "phandle", vbi->msi_phandle);
+ qemu_fdt_setprop(vms->fdt, "/intc/its", "msi-controller", NULL, 0);
+ qemu_fdt_setprop_sized_cells(vms->fdt, "/intc/its", "reg",
+ 2, vms->memmap[VIRT_GIC_ITS].base,
+ 2, vms->memmap[VIRT_GIC_ITS].size);
+ qemu_fdt_setprop_cell(vms->fdt, "/intc/its", "phandle", vms->msi_phandle);
}
-static void fdt_add_v2m_gic_node(VirtBoardInfo *vbi)
+static void fdt_add_v2m_gic_node(VirtMachineState *vms)
{
- vbi->msi_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
- qemu_fdt_add_subnode(vbi->fdt, "/intc/v2m");
- qemu_fdt_setprop_string(vbi->fdt, "/intc/v2m", "compatible",
+ vms->msi_phandle = qemu_fdt_alloc_phandle(vms->fdt);
+ qemu_fdt_add_subnode(vms->fdt, "/intc/v2m");
+ qemu_fdt_setprop_string(vms->fdt, "/intc/v2m", "compatible",
"arm,gic-v2m-frame");
- qemu_fdt_setprop(vbi->fdt, "/intc/v2m", "msi-controller", NULL, 0);
- qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc/v2m", "reg",
- 2, vbi->memmap[VIRT_GIC_V2M].base,
- 2, vbi->memmap[VIRT_GIC_V2M].size);
- qemu_fdt_setprop_cell(vbi->fdt, "/intc/v2m", "phandle", vbi->msi_phandle);
+ qemu_fdt_setprop(vms->fdt, "/intc/v2m", "msi-controller", NULL, 0);
+ qemu_fdt_setprop_sized_cells(vms->fdt, "/intc/v2m", "reg",
+ 2, vms->memmap[VIRT_GIC_V2M].base,
+ 2, vms->memmap[VIRT_GIC_V2M].size);
+ qemu_fdt_setprop_cell(vms->fdt, "/intc/v2m", "phandle", vms->msi_phandle);
}
-static void fdt_add_gic_node(VirtBoardInfo *vbi, int type)
+static void fdt_add_gic_node(VirtMachineState *vms)
{
- vbi->gic_phandle = qemu_fdt_alloc_phandle(vbi->fdt);
- qemu_fdt_setprop_cell(vbi->fdt, "/", "interrupt-parent", vbi->gic_phandle);
-
- qemu_fdt_add_subnode(vbi->fdt, "/intc");
- qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#interrupt-cells", 3);
- qemu_fdt_setprop(vbi->fdt, "/intc", "interrupt-controller", NULL, 0);
- qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#address-cells", 0x2);
- qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#size-cells", 0x2);
- qemu_fdt_setprop(vbi->fdt, "/intc", "ranges", NULL, 0);
- if (type == 3) {
- qemu_fdt_setprop_string(vbi->fdt, "/intc", "compatible",
+ vms->gic_phandle = qemu_fdt_alloc_phandle(vms->fdt);
+ qemu_fdt_setprop_cell(vms->fdt, "/", "interrupt-parent", vms->gic_phandle);
+
+ qemu_fdt_add_subnode(vms->fdt, "/intc");
+ qemu_fdt_setprop_cell(vms->fdt, "/intc", "#interrupt-cells", 3);
+ qemu_fdt_setprop(vms->fdt, "/intc", "interrupt-controller", NULL, 0);
+ qemu_fdt_setprop_cell(vms->fdt, "/intc", "#address-cells", 0x2);
+ qemu_fdt_setprop_cell(vms->fdt, "/intc", "#size-cells", 0x2);
+ qemu_fdt_setprop(vms->fdt, "/intc", "ranges", NULL, 0);
+ if (vms->gic_version == 3) {
+ qemu_fdt_setprop_string(vms->fdt, "/intc", "compatible",
"arm,gic-v3");
- qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc", "reg",
- 2, vbi->memmap[VIRT_GIC_DIST].base,
- 2, vbi->memmap[VIRT_GIC_DIST].size,
- 2, vbi->memmap[VIRT_GIC_REDIST].base,
- 2, vbi->memmap[VIRT_GIC_REDIST].size);
+ qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_REDIST].base,
+ 2, vms->memmap[VIRT_GIC_REDIST].size);
+ if (vms->virt) {
+ qemu_fdt_setprop_cells(vms->fdt, "/intc", "interrupts",
+ GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ,
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
+ }
} else {
/* 'cortex-a15-gic' means 'GIC v2' */
- qemu_fdt_setprop_string(vbi->fdt, "/intc", "compatible",
+ qemu_fdt_setprop_string(vms->fdt, "/intc", "compatible",
"arm,cortex-a15-gic");
- qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc", "reg",
- 2, vbi->memmap[VIRT_GIC_DIST].base,
- 2, vbi->memmap[VIRT_GIC_DIST].size,
- 2, vbi->memmap[VIRT_GIC_CPU].base,
- 2, vbi->memmap[VIRT_GIC_CPU].size);
+ qemu_fdt_setprop_sized_cells(vms->fdt, "/intc", "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_CPU].base,
+ 2, vms->memmap[VIRT_GIC_CPU].size);
}
- qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", vbi->gic_phandle);
+ qemu_fdt_setprop_cell(vms->fdt, "/intc", "phandle", vms->gic_phandle);
}
-static void fdt_add_pmu_nodes(const VirtBoardInfo *vbi, int gictype)
+static void fdt_add_pmu_nodes(const VirtMachineState *vms)
{
CPUState *cpu;
ARMCPU *armcpu;
@@ -497,24 +476,24 @@ static void fdt_add_pmu_nodes(const VirtBoardInfo *vbi, int gictype)
}
}
- if (gictype == 2) {
+ if (vms->gic_version == 2) {
irqflags = deposit32(irqflags, GIC_FDT_IRQ_PPI_CPU_START,
GIC_FDT_IRQ_PPI_CPU_WIDTH,
- (1 << vbi->smp_cpus) - 1);
+ (1 << vms->smp_cpus) - 1);
}
armcpu = ARM_CPU(qemu_get_cpu(0));
- qemu_fdt_add_subnode(vbi->fdt, "/pmu");
+ qemu_fdt_add_subnode(vms->fdt, "/pmu");
if (arm_feature(&armcpu->env, ARM_FEATURE_V8)) {
const char compat[] = "arm,armv8-pmuv3";
- qemu_fdt_setprop(vbi->fdt, "/pmu", "compatible",
+ qemu_fdt_setprop(vms->fdt, "/pmu", "compatible",
compat, sizeof(compat));
- qemu_fdt_setprop_cells(vbi->fdt, "/pmu", "interrupts",
+ qemu_fdt_setprop_cells(vms->fdt, "/pmu", "interrupts",
GIC_FDT_IRQ_TYPE_PPI, VIRTUAL_PMU_IRQ, irqflags);
}
}
-static void create_its(VirtBoardInfo *vbi, DeviceState *gicdev)
+static void create_its(VirtMachineState *vms, DeviceState *gicdev)
{
const char *itsclass = its_class_name();
DeviceState *dev;
@@ -529,19 +508,19 @@ static void create_its(VirtBoardInfo *vbi, DeviceState *gicdev)
object_property_set_link(OBJECT(dev), OBJECT(gicdev), "parent-gicv3",
&error_abort);
qdev_init_nofail(dev);
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vbi->memmap[VIRT_GIC_ITS].base);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_GIC_ITS].base);
- fdt_add_its_gic_node(vbi);
+ fdt_add_its_gic_node(vms);
}
-static void create_v2m(VirtBoardInfo *vbi, qemu_irq *pic)
+static void create_v2m(VirtMachineState *vms, qemu_irq *pic)
{
int i;
- int irq = vbi->irqmap[VIRT_GIC_V2M];
+ int irq = vms->irqmap[VIRT_GIC_V2M];
DeviceState *dev;
dev = qdev_create(NULL, "arm-gicv2m");
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vbi->memmap[VIRT_GIC_V2M].base);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_GIC_V2M].base);
qdev_prop_set_uint32(dev, "base-spi", irq);
qdev_prop_set_uint32(dev, "num-spi", NUM_GICV2M_SPIS);
qdev_init_nofail(dev);
@@ -550,17 +529,17 @@ static void create_v2m(VirtBoardInfo *vbi, qemu_irq *pic)
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
}
- fdt_add_v2m_gic_node(vbi);
+ fdt_add_v2m_gic_node(vms);
}
-static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, int type,
- bool secure, bool no_its)
+static void create_gic(VirtMachineState *vms, qemu_irq *pic)
{
/* We create a standalone GIC */
+ VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
DeviceState *gicdev;
SysBusDevice *gicbusdev;
const char *gictype;
- int i;
+ int type = vms->gic_version, i;
gictype = (type == 3) ? gicv3_class_name() : gic_class_name();
@@ -572,20 +551,20 @@ static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, int type,
*/
qdev_prop_set_uint32(gicdev, "num-irq", NUM_IRQS + 32);
if (!kvm_irqchip_in_kernel()) {
- qdev_prop_set_bit(gicdev, "has-security-extensions", secure);
+ qdev_prop_set_bit(gicdev, "has-security-extensions", vms->secure);
}
qdev_init_nofail(gicdev);
gicbusdev = SYS_BUS_DEVICE(gicdev);
- sysbus_mmio_map(gicbusdev, 0, vbi->memmap[VIRT_GIC_DIST].base);
+ sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base);
if (type == 3) {
- sysbus_mmio_map(gicbusdev, 1, vbi->memmap[VIRT_GIC_REDIST].base);
+ sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base);
} else {
- sysbus_mmio_map(gicbusdev, 1, vbi->memmap[VIRT_GIC_CPU].base);
+ sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
}
- /* Wire the outputs from each CPU's generic timer to the
- * appropriate GIC PPI inputs, and the GIC's IRQ output to
- * the CPU's IRQ input.
+ /* Wire the outputs from each CPU's generic timer and the GICv3
+ * maintenance interrupt signal to the appropriate GIC PPI inputs,
+ * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs.
*/
for (i = 0; i < smp_cpus; i++) {
DeviceState *cpudev = DEVICE(qemu_get_cpu(i));
@@ -607,31 +586,39 @@ static void create_gic(VirtBoardInfo *vbi, qemu_irq *pic, int type,
ppibase + timer_irq[irq]));
}
+ qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
+ qdev_get_gpio_in(gicdev, ppibase
+ + ARCH_GICV3_MAINT_IRQ));
+
sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ));
sysbus_connect_irq(gicbusdev, i + smp_cpus,
qdev_get_gpio_in(cpudev, ARM_CPU_FIQ));
+ sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ));
+ sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus,
+ qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ));
}
for (i = 0; i < NUM_IRQS; i++) {
pic[i] = qdev_get_gpio_in(gicdev, i);
}
- fdt_add_gic_node(vbi, type);
+ fdt_add_gic_node(vms);
- if (type == 3 && !no_its) {
- create_its(vbi, gicdev);
+ if (type == 3 && !vmc->no_its) {
+ create_its(vms, gicdev);
} else if (type == 2) {
- create_v2m(vbi, pic);
+ create_v2m(vms, pic);
}
}
-static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic, int uart,
+static void create_uart(const VirtMachineState *vms, qemu_irq *pic, int uart,
MemoryRegion *mem, CharDriverState *chr)
{
char *nodename;
- hwaddr base = vbi->memmap[uart].base;
- hwaddr size = vbi->memmap[uart].size;
- int irq = vbi->irqmap[uart];
+ hwaddr base = vms->memmap[uart].base;
+ hwaddr size = vms->memmap[uart].size;
+ int irq = vms->irqmap[uart];
const char compat[] = "arm,pl011\0arm,primecell";
const char clocknames[] = "uartclk\0apb_pclk";
DeviceState *dev = qdev_create(NULL, "pl011");
@@ -644,51 +631,51 @@ static void create_uart(const VirtBoardInfo *vbi, qemu_irq *pic, int uart,
sysbus_connect_irq(s, 0, pic[irq]);
nodename = g_strdup_printf("/pl011@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
+ qemu_fdt_add_subnode(vms->fdt, nodename);
/* Note that we can't use setprop_string because of the embedded NUL */
- qemu_fdt_setprop(vbi->fdt, nodename, "compatible",
+ qemu_fdt_setprop(vms->fdt, nodename, "compatible",
compat, sizeof(compat));
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base, 2, size);
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "clocks",
- vbi->clock_phandle, vbi->clock_phandle);
- qemu_fdt_setprop(vbi->fdt, nodename, "clock-names",
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "clocks",
+ vms->clock_phandle, vms->clock_phandle);
+ qemu_fdt_setprop(vms->fdt, nodename, "clock-names",
clocknames, sizeof(clocknames));
if (uart == VIRT_UART) {
- qemu_fdt_setprop_string(vbi->fdt, "/chosen", "stdout-path", nodename);
+ qemu_fdt_setprop_string(vms->fdt, "/chosen", "stdout-path", nodename);
} else {
/* Mark as not usable by the normal world */
- qemu_fdt_setprop_string(vbi->fdt, nodename, "status", "disabled");
- qemu_fdt_setprop_string(vbi->fdt, nodename, "secure-status", "okay");
+ qemu_fdt_setprop_string(vms->fdt, nodename, "status", "disabled");
+ qemu_fdt_setprop_string(vms->fdt, nodename, "secure-status", "okay");
}
g_free(nodename);
}
-static void create_rtc(const VirtBoardInfo *vbi, qemu_irq *pic)
+static void create_rtc(const VirtMachineState *vms, qemu_irq *pic)
{
char *nodename;
- hwaddr base = vbi->memmap[VIRT_RTC].base;
- hwaddr size = vbi->memmap[VIRT_RTC].size;
- int irq = vbi->irqmap[VIRT_RTC];
+ hwaddr base = vms->memmap[VIRT_RTC].base;
+ hwaddr size = vms->memmap[VIRT_RTC].size;
+ int irq = vms->irqmap[VIRT_RTC];
const char compat[] = "arm,pl031\0arm,primecell";
sysbus_create_simple("pl031", base, pic[irq]);
nodename = g_strdup_printf("/pl031@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop(vbi->fdt, nodename, "compatible", compat, sizeof(compat));
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop(vms->fdt, nodename, "compatible", compat, sizeof(compat));
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base, 2, size);
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "clocks", vbi->clock_phandle);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "clock-names", "apb_pclk");
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "clocks", vms->clock_phandle);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "clock-names", "apb_pclk");
g_free(nodename);
}
@@ -703,45 +690,45 @@ static Notifier virt_system_powerdown_notifier = {
.notify = virt_powerdown_req
};
-static void create_gpio(const VirtBoardInfo *vbi, qemu_irq *pic)
+static void create_gpio(const VirtMachineState *vms, qemu_irq *pic)
{
char *nodename;
DeviceState *pl061_dev;
- hwaddr base = vbi->memmap[VIRT_GPIO].base;
- hwaddr size = vbi->memmap[VIRT_GPIO].size;
- int irq = vbi->irqmap[VIRT_GPIO];
+ hwaddr base = vms->memmap[VIRT_GPIO].base;
+ hwaddr size = vms->memmap[VIRT_GPIO].size;
+ int irq = vms->irqmap[VIRT_GPIO];
const char compat[] = "arm,pl061\0arm,primecell";
pl061_dev = sysbus_create_simple("pl061", base, pic[irq]);
- uint32_t phandle = qemu_fdt_alloc_phandle(vbi->fdt);
+ uint32_t phandle = qemu_fdt_alloc_phandle(vms->fdt);
nodename = g_strdup_printf("/pl061@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base, 2, size);
- qemu_fdt_setprop(vbi->fdt, nodename, "compatible", compat, sizeof(compat));
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "#gpio-cells", 2);
- qemu_fdt_setprop(vbi->fdt, nodename, "gpio-controller", NULL, 0);
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
+ qemu_fdt_setprop(vms->fdt, nodename, "compatible", compat, sizeof(compat));
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "#gpio-cells", 2);
+ qemu_fdt_setprop(vms->fdt, nodename, "gpio-controller", NULL, 0);
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "clocks", vbi->clock_phandle);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "clock-names", "apb_pclk");
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "phandle", phandle);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "clocks", vms->clock_phandle);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "clock-names", "apb_pclk");
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "phandle", phandle);
gpio_key_dev = sysbus_create_simple("gpio-key", -1,
qdev_get_gpio_in(pl061_dev, 3));
- qemu_fdt_add_subnode(vbi->fdt, "/gpio-keys");
- qemu_fdt_setprop_string(vbi->fdt, "/gpio-keys", "compatible", "gpio-keys");
- qemu_fdt_setprop_cell(vbi->fdt, "/gpio-keys", "#size-cells", 0);
- qemu_fdt_setprop_cell(vbi->fdt, "/gpio-keys", "#address-cells", 1);
+ qemu_fdt_add_subnode(vms->fdt, "/gpio-keys");
+ qemu_fdt_setprop_string(vms->fdt, "/gpio-keys", "compatible", "gpio-keys");
+ qemu_fdt_setprop_cell(vms->fdt, "/gpio-keys", "#size-cells", 0);
+ qemu_fdt_setprop_cell(vms->fdt, "/gpio-keys", "#address-cells", 1);
- qemu_fdt_add_subnode(vbi->fdt, "/gpio-keys/poweroff");
- qemu_fdt_setprop_string(vbi->fdt, "/gpio-keys/poweroff",
+ qemu_fdt_add_subnode(vms->fdt, "/gpio-keys/poweroff");
+ qemu_fdt_setprop_string(vms->fdt, "/gpio-keys/poweroff",
"label", "GPIO Key Poweroff");
- qemu_fdt_setprop_cell(vbi->fdt, "/gpio-keys/poweroff", "linux,code",
+ qemu_fdt_setprop_cell(vms->fdt, "/gpio-keys/poweroff", "linux,code",
KEY_POWER);
- qemu_fdt_setprop_cells(vbi->fdt, "/gpio-keys/poweroff",
+ qemu_fdt_setprop_cells(vms->fdt, "/gpio-keys/poweroff",
"gpios", phandle, 3, 0);
/* connect powerdown request */
@@ -750,10 +737,10 @@ static void create_gpio(const VirtBoardInfo *vbi, qemu_irq *pic)
g_free(nodename);
}
-static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
+static void create_virtio_devices(const VirtMachineState *vms, qemu_irq *pic)
{
int i;
- hwaddr size = vbi->memmap[VIRT_MMIO].size;
+ hwaddr size = vms->memmap[VIRT_MMIO].size;
/* We create the transports in forwards order. Since qbus_realize()
* prepends (not appends) new child buses, the incrementing loop below will
@@ -783,8 +770,8 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
* of disks users must use UUIDs or similar mechanisms.
*/
for (i = 0; i < NUM_VIRTIO_TRANSPORTS; i++) {
- int irq = vbi->irqmap[VIRT_MMIO] + i;
- hwaddr base = vbi->memmap[VIRT_MMIO].base + i * size;
+ int irq = vms->irqmap[VIRT_MMIO] + i;
+ hwaddr base = vms->memmap[VIRT_MMIO].base + i * size;
sysbus_create_simple("virtio-mmio", base, pic[irq]);
}
@@ -798,16 +785,16 @@ static void create_virtio_devices(const VirtBoardInfo *vbi, qemu_irq *pic)
*/
for (i = NUM_VIRTIO_TRANSPORTS - 1; i >= 0; i--) {
char *nodename;
- int irq = vbi->irqmap[VIRT_MMIO] + i;
- hwaddr base = vbi->memmap[VIRT_MMIO].base + i * size;
+ int irq = vms->irqmap[VIRT_MMIO] + i;
+ hwaddr base = vms->memmap[VIRT_MMIO].base + i * size;
nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename,
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename,
"compatible", "virtio,mmio");
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base, 2, size);
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupts",
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
GIC_FDT_IRQ_TYPE_SPI, irq,
GIC_FDT_IRQ_FLAGS_EDGE_LO_HI);
g_free(nodename);
@@ -870,7 +857,7 @@ static void create_one_flash(const char *name, hwaddr flashbase,
}
}
-static void create_flash(const VirtBoardInfo *vbi,
+static void create_flash(const VirtMachineState *vms,
MemoryRegion *sysmem,
MemoryRegion *secure_sysmem)
{
@@ -882,8 +869,8 @@ static void create_flash(const VirtBoardInfo *vbi,
* If sysmem == secure_sysmem this means there is no separate Secure
* address space and both flash devices are generally visible.
*/
- hwaddr flashsize = vbi->memmap[VIRT_FLASH].size / 2;
- hwaddr flashbase = vbi->memmap[VIRT_FLASH].base;
+ hwaddr flashsize = vms->memmap[VIRT_FLASH].size / 2;
+ hwaddr flashbase = vms->memmap[VIRT_FLASH].base;
char *nodename;
create_one_flash("virt.flash0", flashbase, flashsize,
@@ -894,41 +881,41 @@ static void create_flash(const VirtBoardInfo *vbi,
if (sysmem == secure_sysmem) {
/* Report both flash devices as a single node in the DT */
nodename = g_strdup_printf("/flash@%" PRIx64, flashbase);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash");
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "compatible", "cfi-flash");
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, flashbase, 2, flashsize,
2, flashbase + flashsize, 2, flashsize);
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "bank-width", 4);
g_free(nodename);
} else {
/* Report the devices as separate nodes so we can mark one as
* only visible to the secure world.
*/
nodename = g_strdup_printf("/secflash@%" PRIx64, flashbase);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash");
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "compatible", "cfi-flash");
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, flashbase, 2, flashsize);
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "status", "disabled");
- qemu_fdt_setprop_string(vbi->fdt, nodename, "secure-status", "okay");
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "bank-width", 4);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "status", "disabled");
+ qemu_fdt_setprop_string(vms->fdt, nodename, "secure-status", "okay");
g_free(nodename);
nodename = g_strdup_printf("/flash@%" PRIx64, flashbase);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "compatible", "cfi-flash");
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "compatible", "cfi-flash");
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, flashbase + flashsize, 2, flashsize);
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "bank-width", 4);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "bank-width", 4);
g_free(nodename);
}
}
-static void create_fw_cfg(const VirtBoardInfo *vbi, AddressSpace *as)
+static FWCfgState *create_fw_cfg(const VirtMachineState *vms, AddressSpace *as)
{
- hwaddr base = vbi->memmap[VIRT_FW_CFG].base;
- hwaddr size = vbi->memmap[VIRT_FW_CFG].size;
+ hwaddr base = vms->memmap[VIRT_FW_CFG].base;
+ hwaddr size = vms->memmap[VIRT_FW_CFG].size;
FWCfgState *fw_cfg;
char *nodename;
@@ -936,15 +923,17 @@ static void create_fw_cfg(const VirtBoardInfo *vbi, AddressSpace *as)
fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
nodename = g_strdup_printf("/fw-cfg@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename,
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename,
"compatible", "qemu,fw-cfg-mmio");
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base, 2, size);
g_free(nodename);
+ return fw_cfg;
}
-static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
+static void create_pcie_irq_map(const VirtMachineState *vms,
+ uint32_t gic_phandle,
int first_irq, const char *nodename)
{
int devfn, pin;
@@ -971,28 +960,27 @@ static void create_pcie_irq_map(const VirtBoardInfo *vbi, uint32_t gic_phandle,
}
}
- qemu_fdt_setprop(vbi->fdt, nodename, "interrupt-map",
+ qemu_fdt_setprop(vms->fdt, nodename, "interrupt-map",
full_irq_map, sizeof(full_irq_map));
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "interrupt-map-mask",
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupt-map-mask",
0x1800, 0, 0, /* devfn (PCI_SLOT(3)) */
0x7 /* PCI irq */);
}
-static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
- bool use_highmem)
+static void create_pcie(const VirtMachineState *vms, qemu_irq *pic)
{
- hwaddr base_mmio = vbi->memmap[VIRT_PCIE_MMIO].base;
- hwaddr size_mmio = vbi->memmap[VIRT_PCIE_MMIO].size;
- hwaddr base_mmio_high = vbi->memmap[VIRT_PCIE_MMIO_HIGH].base;
- hwaddr size_mmio_high = vbi->memmap[VIRT_PCIE_MMIO_HIGH].size;
- hwaddr base_pio = vbi->memmap[VIRT_PCIE_PIO].base;
- hwaddr size_pio = vbi->memmap[VIRT_PCIE_PIO].size;
- hwaddr base_ecam = vbi->memmap[VIRT_PCIE_ECAM].base;
- hwaddr size_ecam = vbi->memmap[VIRT_PCIE_ECAM].size;
+ hwaddr base_mmio = vms->memmap[VIRT_PCIE_MMIO].base;
+ hwaddr size_mmio = vms->memmap[VIRT_PCIE_MMIO].size;
+ hwaddr base_mmio_high = vms->memmap[VIRT_PCIE_MMIO_HIGH].base;
+ hwaddr size_mmio_high = vms->memmap[VIRT_PCIE_MMIO_HIGH].size;
+ hwaddr base_pio = vms->memmap[VIRT_PCIE_PIO].base;
+ hwaddr size_pio = vms->memmap[VIRT_PCIE_PIO].size;
+ hwaddr base_ecam = vms->memmap[VIRT_PCIE_ECAM].base;
+ hwaddr size_ecam = vms->memmap[VIRT_PCIE_ECAM].size;
hwaddr base = base_mmio;
int nr_pcie_buses = size_ecam / PCIE_MMCFG_SIZE_MIN;
- int irq = vbi->irqmap[VIRT_PCIE];
+ int irq = vms->irqmap[VIRT_PCIE];
MemoryRegion *mmio_alias;
MemoryRegion *mmio_reg;
MemoryRegion *ecam_alias;
@@ -1023,7 +1011,7 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
mmio_reg, base_mmio, size_mmio);
memory_region_add_subregion(get_system_memory(), base_mmio, mmio_alias);
- if (use_highmem) {
+ if (vms->highmem) {
/* Map high MMIO space */
MemoryRegion *high_mmio_alias = g_new0(MemoryRegion, 1);
@@ -1054,26 +1042,26 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
}
nodename = g_strdup_printf("/pcie@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename,
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename,
"compatible", "pci-host-ecam-generic");
- qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "pci");
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "#address-cells", 3);
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "#size-cells", 2);
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "bus-range", 0,
+ qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "pci");
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "#address-cells", 3);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "#size-cells", 2);
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "bus-range", 0,
nr_pcie_buses - 1);
- qemu_fdt_setprop(vbi->fdt, nodename, "dma-coherent", NULL, 0);
+ qemu_fdt_setprop(vms->fdt, nodename, "dma-coherent", NULL, 0);
- if (vbi->msi_phandle) {
- qemu_fdt_setprop_cells(vbi->fdt, nodename, "msi-parent",
- vbi->msi_phandle);
+ if (vms->msi_phandle) {
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "msi-parent",
+ vms->msi_phandle);
}
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg",
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
2, base_ecam, 2, size_ecam);
- if (use_highmem) {
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
+ if (vms->highmem) {
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "ranges",
1, FDT_PCI_RANGE_IOPORT, 2, 0,
2, base_pio, 2, size_pio,
1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
@@ -1082,20 +1070,20 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
2, base_mmio_high,
2, base_mmio_high, 2, size_mmio_high);
} else {
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "ranges",
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "ranges",
1, FDT_PCI_RANGE_IOPORT, 2, 0,
2, base_pio, 2, size_pio,
1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
2, base_mmio, 2, size_mmio);
}
- qemu_fdt_setprop_cell(vbi->fdt, nodename, "#interrupt-cells", 1);
- create_pcie_irq_map(vbi, vbi->gic_phandle, irq, nodename);
+ qemu_fdt_setprop_cell(vms->fdt, nodename, "#interrupt-cells", 1);
+ create_pcie_irq_map(vms, vms->gic_phandle, irq, nodename);
g_free(nodename);
}
-static void create_platform_bus(VirtBoardInfo *vbi, qemu_irq *pic)
+static void create_platform_bus(VirtMachineState *vms, qemu_irq *pic)
{
DeviceState *dev;
SysBusDevice *s;
@@ -1103,13 +1091,13 @@ static void create_platform_bus(VirtBoardInfo *vbi, qemu_irq *pic)
ARMPlatformBusFDTParams *fdt_params = g_new(ARMPlatformBusFDTParams, 1);
MemoryRegion *sysmem = get_system_memory();
- platform_bus_params.platform_bus_base = vbi->memmap[VIRT_PLATFORM_BUS].base;
- platform_bus_params.platform_bus_size = vbi->memmap[VIRT_PLATFORM_BUS].size;
- platform_bus_params.platform_bus_first_irq = vbi->irqmap[VIRT_PLATFORM_BUS];
+ platform_bus_params.platform_bus_base = vms->memmap[VIRT_PLATFORM_BUS].base;
+ platform_bus_params.platform_bus_size = vms->memmap[VIRT_PLATFORM_BUS].size;
+ platform_bus_params.platform_bus_first_irq = vms->irqmap[VIRT_PLATFORM_BUS];
platform_bus_params.platform_bus_num_irqs = PLATFORM_BUS_NUM_IRQS;
fdt_params->system_params = &platform_bus_params;
- fdt_params->binfo = &vbi->bootinfo;
+ fdt_params->binfo = &vms->bootinfo;
fdt_params->intc = "/intc";
/*
* register a machine init done notifier that creates the device tree
@@ -1136,43 +1124,44 @@ static void create_platform_bus(VirtBoardInfo *vbi, qemu_irq *pic)
sysbus_mmio_get_region(s, 0));
}
-static void create_secure_ram(VirtBoardInfo *vbi, MemoryRegion *secure_sysmem)
+static void create_secure_ram(VirtMachineState *vms,
+ MemoryRegion *secure_sysmem)
{
MemoryRegion *secram = g_new(MemoryRegion, 1);
char *nodename;
- hwaddr base = vbi->memmap[VIRT_SECURE_MEM].base;
- hwaddr size = vbi->memmap[VIRT_SECURE_MEM].size;
+ hwaddr base = vms->memmap[VIRT_SECURE_MEM].base;
+ hwaddr size = vms->memmap[VIRT_SECURE_MEM].size;
memory_region_init_ram(secram, NULL, "virt.secure-ram", size, &error_fatal);
vmstate_register_ram_global(secram);
memory_region_add_subregion(secure_sysmem, base, secram);
nodename = g_strdup_printf("/secram@%" PRIx64, base);
- qemu_fdt_add_subnode(vbi->fdt, nodename);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "device_type", "memory");
- qemu_fdt_setprop_sized_cells(vbi->fdt, nodename, "reg", 2, base, 2, size);
- qemu_fdt_setprop_string(vbi->fdt, nodename, "status", "disabled");
- qemu_fdt_setprop_string(vbi->fdt, nodename, "secure-status", "okay");
+ qemu_fdt_add_subnode(vms->fdt, nodename);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "memory");
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg", 2, base, 2, size);
+ qemu_fdt_setprop_string(vms->fdt, nodename, "status", "disabled");
+ qemu_fdt_setprop_string(vms->fdt, nodename, "secure-status", "okay");
g_free(nodename);
}
static void *machvirt_dtb(const struct arm_boot_info *binfo, int *fdt_size)
{
- const VirtBoardInfo *board = (const VirtBoardInfo *)binfo;
+ const VirtMachineState *board = container_of(binfo, VirtMachineState,
+ bootinfo);
*fdt_size = board->fdt_size;
return board->fdt;
}
-static void virt_build_smbios(VirtGuestInfo *guest_info)
+static void virt_build_smbios(VirtMachineState *vms)
{
- FWCfgState *fw_cfg = guest_info->fw_cfg;
uint8_t *smbios_tables, *smbios_anchor;
size_t smbios_tables_len, smbios_anchor_len;
const char *product = "QEMU Virtual Machine";
- if (!fw_cfg) {
+ if (!vms->fw_cfg) {
return;
}
@@ -1187,20 +1176,21 @@ static void virt_build_smbios(VirtGuestInfo *guest_info)
&smbios_anchor, &smbios_anchor_len);
if (smbios_anchor) {
- fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-tables",
+ fw_cfg_add_file(vms->fw_cfg, "etc/smbios/smbios-tables",
smbios_tables, smbios_tables_len);
- fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-anchor",
+ fw_cfg_add_file(vms->fw_cfg, "etc/smbios/smbios-anchor",
smbios_anchor, smbios_anchor_len);
}
}
static
-void virt_guest_info_machine_done(Notifier *notifier, void *data)
+void virt_machine_done(Notifier *notifier, void *data)
{
- VirtGuestInfoState *guest_info_state = container_of(notifier,
- VirtGuestInfoState, machine_done);
- virt_acpi_setup(&guest_info_state->info);
- virt_build_smbios(&guest_info_state->info);
+ VirtMachineState *vms = container_of(notifier, VirtMachineState,
+ machine_done);
+
+ virt_acpi_setup(vms);
+ virt_build_smbios(vms);
}
static void machvirt_init(MachineState *machine)
@@ -1210,13 +1200,9 @@ static void machvirt_init(MachineState *machine)
qemu_irq pic[NUM_IRQS];
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *secure_sysmem = NULL;
- int gic_version = vms->gic_version;
int n, virt_max_cpus;
MemoryRegion *ram = g_new(MemoryRegion, 1);
const char *cpu_model = machine->cpu_model;
- VirtBoardInfo *vbi;
- VirtGuestInfoState *guest_info_state = g_malloc0(sizeof *guest_info_state);
- VirtGuestInfo *guest_info = &guest_info_state->info;
char **cpustr;
ObjectClass *oc;
const char *typename;
@@ -1232,14 +1218,14 @@ static void machvirt_init(MachineState *machine)
/* We can probe only here because during property set
* KVM is not available yet
*/
- if (!gic_version) {
+ if (!vms->gic_version) {
if (!kvm_enabled()) {
error_report("gic-version=host requires KVM");
exit(1);
}
- gic_version = kvm_arm_vgic_probe();
- if (!gic_version) {
+ vms->gic_version = kvm_arm_vgic_probe();
+ if (!vms->gic_version) {
error_report("Unable to determine GIC version supported by host");
exit(1);
}
@@ -1248,9 +1234,7 @@ static void machvirt_init(MachineState *machine)
/* Separate the actual CPU model name from any appended features */
cpustr = g_strsplit(cpu_model, ",", 2);
- vbi = find_machine_info(cpustr[0]);
-
- if (!vbi) {
+ if (!cpuname_valid(cpustr[0])) {
error_report("mach-virt: CPU %s not supported", cpustr[0]);
exit(1);
}
@@ -1260,15 +1244,24 @@ static void machvirt_init(MachineState *machine)
* so it doesn't get in the way. Instead of starting secondary
* CPUs in PSCI powerdown state we will start them all running and
* let the boot ROM sort them out.
- * The usual case is that we do use QEMU's PSCI implementation.
+ * The usual case is that we do use QEMU's PSCI implementation;
+ * if the guest has EL2 then we will use SMC as the conduit,
+ * and otherwise we will use HVC (for backwards compatibility and
+ * because if we're using KVM then we must use HVC).
*/
- vbi->using_psci = !(vms->secure && firmware_loaded);
+ if (vms->secure && firmware_loaded) {
+ vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED;
+ } else if (vms->virt) {
+ vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC;
+ } else {
+ vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC;
+ }
/* The maximum number of CPUs depends on the GIC version, or on how
* many redistributors we can fit into the memory map.
*/
- if (gic_version == 3) {
- virt_max_cpus = vbi->memmap[VIRT_GIC_REDIST].size / 0x20000;
+ if (vms->gic_version == 3) {
+ virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
clustersz = GICV3_TARGETLIST_BITS;
} else {
virt_max_cpus = GIC_NCPU;
@@ -1282,13 +1275,19 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- vbi->smp_cpus = smp_cpus;
+ vms->smp_cpus = smp_cpus;
- if (machine->ram_size > vbi->memmap[VIRT_MEM].size) {
+ if (machine->ram_size > vms->memmap[VIRT_MEM].size) {
error_report("mach-virt: cannot model more than %dGB RAM", RAMLIMIT_GB);
exit(1);
}
+ if (vms->virt && kvm_enabled()) {
+ error_report("mach-virt: KVM does not support providing "
+ "Virtualization extensions to the guest CPU");
+ exit(1);
+ }
+
if (vms->secure) {
if (kvm_enabled()) {
error_report("mach-virt: KVM does not support Security extensions");
@@ -1306,7 +1305,7 @@ static void machvirt_init(MachineState *machine)
memory_region_add_subregion_overlap(secure_sysmem, 0, sysmem, -1);
}
- create_fdt(vbi);
+ create_fdt(vms);
oc = cpu_class_by_name(TYPE_ARM_CPU, cpustr[0]);
if (!oc) {
@@ -1345,8 +1344,12 @@ static void machvirt_init(MachineState *machine)
object_property_set_bool(cpuobj, false, "has_el3", NULL);
}
- if (vbi->using_psci) {
- object_property_set_int(cpuobj, QEMU_PSCI_CONDUIT_HVC,
+ if (!vms->virt && object_property_find(cpuobj, "has_el2", NULL)) {
+ object_property_set_bool(cpuobj, false, "has_el2", NULL);
+ }
+
+ if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) {
+ object_property_set_int(cpuobj, vms->psci_conduit,
"psci-conduit", NULL);
/* Secondary CPUs start in PSCI powered-down state */
@@ -1361,7 +1364,7 @@ static void machvirt_init(MachineState *machine)
}
if (object_property_find(cpuobj, "reset-cbar", NULL)) {
- object_property_set_int(cpuobj, vbi->memmap[VIRT_CPUPERIPHS].base,
+ object_property_set_int(cpuobj, vms->memmap[VIRT_CPUPERIPHS].base,
"reset-cbar", &error_abort);
}
@@ -1374,62 +1377,55 @@ static void machvirt_init(MachineState *machine)
object_property_set_bool(cpuobj, true, "realized", NULL);
}
- fdt_add_timer_nodes(vbi, gic_version);
- fdt_add_cpu_nodes(vbi);
- fdt_add_psci_node(vbi);
+ fdt_add_timer_nodes(vms);
+ fdt_add_cpu_nodes(vms);
+ fdt_add_psci_node(vms);
memory_region_allocate_system_memory(ram, NULL, "mach-virt.ram",
machine->ram_size);
- memory_region_add_subregion(sysmem, vbi->memmap[VIRT_MEM].base, ram);
+ memory_region_add_subregion(sysmem, vms->memmap[VIRT_MEM].base, ram);
- create_flash(vbi, sysmem, secure_sysmem ? secure_sysmem : sysmem);
+ create_flash(vms, sysmem, secure_sysmem ? secure_sysmem : sysmem);
- create_gic(vbi, pic, gic_version, vms->secure, vmc->no_its);
+ create_gic(vms, pic);
- fdt_add_pmu_nodes(vbi, gic_version);
+ fdt_add_pmu_nodes(vms);
- create_uart(vbi, pic, VIRT_UART, sysmem, serial_hds[0]);
+ create_uart(vms, pic, VIRT_UART, sysmem, serial_hds[0]);
if (vms->secure) {
- create_secure_ram(vbi, secure_sysmem);
- create_uart(vbi, pic, VIRT_SECURE_UART, secure_sysmem, serial_hds[1]);
+ create_secure_ram(vms, secure_sysmem);
+ create_uart(vms, pic, VIRT_SECURE_UART, secure_sysmem, serial_hds[1]);
}
- create_rtc(vbi, pic);
+ create_rtc(vms, pic);
- create_pcie(vbi, pic, vms->highmem);
+ create_pcie(vms, pic);
- create_gpio(vbi, pic);
+ create_gpio(vms, pic);
/* Create mmio transports, so the user can create virtio backends
* (which will be automatically plugged in to the transports). If
* no backend is created the transport will just sit harmlessly idle.
*/
- create_virtio_devices(vbi, pic);
-
- create_fw_cfg(vbi, &address_space_memory);
- rom_set_fw(fw_cfg_find());
-
- guest_info->smp_cpus = smp_cpus;
- guest_info->fw_cfg = fw_cfg_find();
- guest_info->memmap = vbi->memmap;
- guest_info->irqmap = vbi->irqmap;
- guest_info->use_highmem = vms->highmem;
- guest_info->gic_version = gic_version;
- guest_info->no_its = vmc->no_its;
- guest_info_state->machine_done.notify = virt_guest_info_machine_done;
- qemu_add_machine_init_done_notifier(&guest_info_state->machine_done);
-
- vbi->bootinfo.ram_size = machine->ram_size;
- vbi->bootinfo.kernel_filename = machine->kernel_filename;
- vbi->bootinfo.kernel_cmdline = machine->kernel_cmdline;
- vbi->bootinfo.initrd_filename = machine->initrd_filename;
- vbi->bootinfo.nb_cpus = smp_cpus;
- vbi->bootinfo.board_id = -1;
- vbi->bootinfo.loader_start = vbi->memmap[VIRT_MEM].base;
- vbi->bootinfo.get_dtb = machvirt_dtb;
- vbi->bootinfo.firmware_loaded = firmware_loaded;
- arm_load_kernel(ARM_CPU(first_cpu), &vbi->bootinfo);
+ create_virtio_devices(vms, pic);
+
+ vms->fw_cfg = create_fw_cfg(vms, &address_space_memory);
+ rom_set_fw(vms->fw_cfg);
+
+ vms->machine_done.notify = virt_machine_done;
+ qemu_add_machine_init_done_notifier(&vms->machine_done);
+
+ vms->bootinfo.ram_size = machine->ram_size;
+ vms->bootinfo.kernel_filename = machine->kernel_filename;
+ vms->bootinfo.kernel_cmdline = machine->kernel_cmdline;
+ vms->bootinfo.initrd_filename = machine->initrd_filename;
+ vms->bootinfo.nb_cpus = smp_cpus;
+ vms->bootinfo.board_id = -1;
+ vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base;
+ vms->bootinfo.get_dtb = machvirt_dtb;
+ vms->bootinfo.firmware_loaded = firmware_loaded;
+ arm_load_kernel(ARM_CPU(first_cpu), &vms->bootinfo);
/*
* arm_load_kernel machine init done notifier registration must
@@ -1437,7 +1433,7 @@ static void machvirt_init(MachineState *machine)
* another notifier is registered which adds platform bus nodes.
* Notifiers are executed in registration reverse order.
*/
- create_platform_bus(vbi, pic);
+ create_platform_bus(vms, pic);
}
static bool virt_get_secure(Object *obj, Error **errp)
@@ -1454,6 +1450,20 @@ static void virt_set_secure(Object *obj, bool value, Error **errp)
vms->secure = value;
}
+static bool virt_get_virt(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ return vms->virt;
+}
+
+static void virt_set_virt(Object *obj, bool value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ vms->virt = value;
+}
+
static bool virt_get_highmem(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1525,7 +1535,7 @@ static void machvirt_machine_init(void)
}
type_init(machvirt_machine_init);
-static void virt_2_8_instance_init(Object *obj)
+static void virt_2_9_instance_init(Object *obj)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
@@ -1541,6 +1551,16 @@ static void virt_2_8_instance_init(Object *obj)
"Security Extensions (TrustZone)",
NULL);
+ /* EL2 is also disabled by default, for similar reasons */
+ vms->virt = false;
+ object_property_add_bool(obj, "virtualization", virt_get_virt,
+ virt_set_virt, NULL);
+ object_property_set_description(obj, "virtualization",
+ "Set on/off to enable/disable emulating a "
+ "guest CPU which implements the ARM "
+ "Virtualization Extensions",
+ NULL);
+
/* High memory is enabled by default */
vms->highmem = true;
object_property_add_bool(obj, "highmem", virt_get_highmem,
@@ -1556,12 +1576,36 @@ static void virt_2_8_instance_init(Object *obj)
object_property_set_description(obj, "gic-version",
"Set GIC version. "
"Valid values are 2, 3 and host", NULL);
+
+ vms->memmap = a15memmap;
+ vms->irqmap = a15irqmap;
+}
+
+static void virt_machine_2_9_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(2, 9)
+
+#define VIRT_COMPAT_2_8 \
+ HW_COMPAT_2_8
+
+static void virt_2_8_instance_init(Object *obj)
+{
+ virt_2_9_instance_init(obj);
}
static void virt_machine_2_8_options(MachineClass *mc)
{
+ VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
+
+ virt_machine_2_9_options(mc);
+ SET_MACHINE_COMPAT(mc, VIRT_COMPAT_2_8);
+ /* For 2.8 and earlier we falsely claimed in the DT that
+ * our timers were edge-triggered, not level-triggered.
+ */
+ vmc->claim_edge_triggered_timers = true;
}
-DEFINE_VIRT_MACHINE_AS_LATEST(2, 8)
+DEFINE_VIRT_MACHINE(2, 8)
#define VIRT_COMPAT_2_7 \
HW_COMPAT_2_7
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 0d86ba35ae..bc4e66b862 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -258,6 +258,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
object_property_set_bool(OBJECT(&s->apu_cpu[i]),
s->secure, "has_el3", NULL);
+ object_property_set_bool(OBJECT(&s->apu_cpu[i]),
+ false, "has_el2", NULL);
object_property_set_int(OBJECT(&s->apu_cpu[i]), GIC_BASE_ADDR,
"reset-cbar", &error_abort);
object_property_set_bool(OBJECT(&s->apu_cpu[i]), true, "realized",
diff --git a/hw/arm/z2.c b/hw/arm/z2.c
index 68a92f3184..1607cbdb03 100644
--- a/hw/arm/z2.c
+++ b/hw/arm/z2.c
@@ -220,7 +220,7 @@ static int aer915_send(I2CSlave *i2c, uint8_t data)
return 0;
}
-static void aer915_event(I2CSlave *i2c, enum i2c_event event)
+static int aer915_event(I2CSlave *i2c, enum i2c_event event)
{
AER915State *s = AER915(i2c);
@@ -238,6 +238,8 @@ static void aer915_event(I2CSlave *i2c, enum i2c_event event)
default:
break;
}
+
+ return 0;
}
static int aer915_recv(I2CSlave *slave)
@@ -263,12 +265,6 @@ static int aer915_recv(I2CSlave *slave)
return retval;
}
-static int aer915_init(I2CSlave *i2c)
-{
- /* Nothing to do. */
- return 0;
-}
-
static VMStateDescription vmstate_aer915_state = {
.name = "aer915",
.version_id = 1,
@@ -285,7 +281,6 @@ static void aer915_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
- k->init = aer915_init;
k->event = aer915_event;
k->recv = aer915_recv;
k->send = aer915_send;
diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c
index cbd959e0bd..c30657501c 100644
--- a/hw/audio/ac97.c
+++ b/hw/audio/ac97.c
@@ -1387,6 +1387,16 @@ static void ac97_realize(PCIDevice *dev, Error **errp)
ac97_on_reset (&s->dev.qdev);
}
+static void ac97_exit(PCIDevice *dev)
+{
+ AC97LinkState *s = DO_UPCAST(AC97LinkState, dev, dev);
+
+ AUD_close_in(&s->card, s->voice_pi);
+ AUD_close_out(&s->card, s->voice_po);
+ AUD_close_in(&s->card, s->voice_mc);
+ AUD_remove_card(&s->card);
+}
+
static int ac97_init (PCIBus *bus)
{
pci_create_simple (bus, -1, "AC97");
@@ -1404,6 +1414,7 @@ static void ac97_class_init (ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS (klass);
k->realize = ac97_realize;
+ k->exit = ac97_exit;
k->vendor_id = PCI_VENDOR_ID_INTEL;
k->device_id = PCI_DEVICE_ID_INTEL_82801AA_5;
k->revision = 0x01;
diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c
index 8449b5f436..fe64c1ac37 100644
--- a/hw/audio/es1370.c
+++ b/hw/audio/es1370.c
@@ -1010,9 +1010,9 @@ static const VMStateDescription vmstate_es1370 = {
}
};
-static void es1370_on_reset (void *opaque)
+static void es1370_on_reset(DeviceState *dev)
{
- ES1370State *s = opaque;
+ ES1370State *s = container_of(dev, ES1370State, dev.qdev);
es1370_reset (s);
}
@@ -1035,12 +1035,24 @@ static void es1370_realize(PCIDevice *dev, Error **errp)
memory_region_init_io (&s->io, OBJECT(s), &es1370_io_ops, s, "es1370", 256);
pci_register_bar (&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io);
- qemu_register_reset (es1370_on_reset, s);
AUD_register_card ("es1370", &s->card);
es1370_reset (s);
}
+static void es1370_exit(PCIDevice *dev)
+{
+ ES1370State *s = ES1370(dev);
+ int i;
+
+ for (i = 0; i < 2; ++i) {
+ AUD_close_out(&s->card, s->dac_voice[i]);
+ }
+
+ AUD_close_in(&s->card, s->adc_voice);
+ AUD_remove_card(&s->card);
+}
+
static int es1370_init (PCIBus *bus)
{
pci_create_simple (bus, -1, TYPE_ES1370);
@@ -1053,6 +1065,7 @@ static void es1370_class_init (ObjectClass *klass, void *data)
PCIDeviceClass *k = PCI_DEVICE_CLASS (klass);
k->realize = es1370_realize;
+ k->exit = es1370_exit;
k->vendor_id = PCI_VENDOR_ID_ENSONIQ;
k->device_id = PCI_DEVICE_ID_ENSONIQ_ES1370;
k->class_id = PCI_CLASS_MULTIMEDIA_AUDIO;
@@ -1061,6 +1074,7 @@ static void es1370_class_init (ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->desc = "ENSONIQ AudioPCI ES1370";
dc->vmsd = &vmstate_es1370;
+ dc->reset = es1370_on_reset;
}
static const TypeInfo es1370_info = {
diff --git a/hw/audio/marvell_88w8618.c b/hw/audio/marvell_88w8618.c
index a6ca1806be..511b004287 100644
--- a/hw/audio/marvell_88w8618.c
+++ b/hw/audio/marvell_88w8618.c
@@ -241,19 +241,23 @@ static const MemoryRegionOps mv88w8618_audio_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int mv88w8618_audio_init(SysBusDevice *dev)
+static void mv88w8618_audio_init(Object *obj)
{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
mv88w8618_audio_state *s = MV88W8618_AUDIO(dev);
sysbus_init_irq(dev, &s->irq);
- wm8750_data_req_set(s->wm, mv88w8618_audio_callback, s);
-
- memory_region_init_io(&s->iomem, OBJECT(s), &mv88w8618_audio_ops, s,
+ memory_region_init_io(&s->iomem, obj, &mv88w8618_audio_ops, s,
"audio", MP_AUDIO_SIZE);
sysbus_init_mmio(dev, &s->iomem);
+}
- return 0;
+static void mv88w8618_audio_realize(DeviceState *dev, Error **errp)
+{
+ mv88w8618_audio_state *s = MV88W8618_AUDIO(dev);
+
+ wm8750_data_req_set(s->wm, mv88w8618_audio_callback, s);
}
static const VMStateDescription mv88w8618_audio_vmsd = {
@@ -282,9 +286,8 @@ static Property mv88w8618_audio_properties[] = {
static void mv88w8618_audio_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = mv88w8618_audio_init;
+ dc->realize = mv88w8618_audio_realize;
dc->reset = mv88w8618_audio_reset;
dc->vmsd = &mv88w8618_audio_vmsd;
dc->props = mv88w8618_audio_properties;
@@ -296,6 +299,7 @@ static const TypeInfo mv88w8618_audio_info = {
.name = TYPE_MV88W8618_AUDIO,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(mv88w8618_audio_state),
+ .instance_init = mv88w8618_audio_init,
.class_init = mv88w8618_audio_class_init,
};
diff --git a/hw/audio/pl041.c b/hw/audio/pl041.c
index 6e9c104011..c8cc503236 100644
--- a/hw/audio/pl041.c
+++ b/hw/audio/pl041.c
@@ -521,12 +521,23 @@ static const MemoryRegionOps pl041_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int pl041_init(SysBusDevice *dev)
+static void pl041_init(Object *obj)
{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
PL041State *s = PL041(dev);
DBG_L1("pl041_init 0x%08x\n", (uint32_t)s);
+ /* Connect the device to the sysbus */
+ memory_region_init_io(&s->iomem, obj, &pl041_ops, s, "pl041", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+ sysbus_init_irq(dev, &s->irq);
+}
+
+static void pl041_realize(DeviceState *dev, Error **errp)
+{
+ PL041State *s = PL041(dev);
+
/* Check the device properties */
switch (s->fifo_depth) {
case 8:
@@ -545,18 +556,10 @@ static int pl041_init(SysBusDevice *dev)
qemu_log_mask(LOG_UNIMP,
"pl041: unsupported non-compact fifo depth [%i]\n",
s->fifo_depth);
- return -1;
}
- /* Connect the device to the sysbus */
- memory_region_init_io(&s->iomem, OBJECT(s), &pl041_ops, s, "pl041", 0x1000);
- sysbus_init_mmio(dev, &s->iomem);
- sysbus_init_irq(dev, &s->irq);
-
/* Init the codec */
lm4549_init(&s->codec, &pl041_request_data, (void *)s);
-
- return 0;
}
static const VMStateDescription vmstate_pl041_regfile = {
@@ -627,9 +630,8 @@ static Property pl041_device_properties[] = {
static void pl041_device_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = pl041_init;
+ dc->realize = pl041_realize;
set_bit(DEVICE_CATEGORY_SOUND, dc->categories);
dc->reset = pl041_device_reset;
dc->vmsd = &vmstate_pl041;
@@ -640,6 +642,7 @@ static const TypeInfo pl041_device_info = {
.name = TYPE_PL041,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(PL041State),
+ .instance_init = pl041_init,
.class_init = pl041_device_class_init,
};
diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c
index 0c6500e96a..f8b5bebfc2 100644
--- a/hw/audio/wm8750.c
+++ b/hw/audio/wm8750.c
@@ -303,7 +303,7 @@ static void wm8750_reset(I2CSlave *i2c)
s->i2c_len = 0;
}
-static void wm8750_event(I2CSlave *i2c, enum i2c_event event)
+static int wm8750_event(I2CSlave *i2c, enum i2c_event event)
{
WM8750State *s = WM8750(i2c);
@@ -321,6 +321,8 @@ static void wm8750_event(I2CSlave *i2c, enum i2c_event event)
default:
break;
}
+
+ return 0;
}
#define WM8750_LINVOL 0x00
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index d29ff4cb4f..2d6eb46a04 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -28,6 +28,7 @@
#include "hw/ssi/ssi.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#ifndef M25P80_ERR_DEBUG
@@ -73,6 +74,12 @@ typedef struct FlashPartInfo {
uint32_t n_sectors;
uint32_t page_size;
uint16_t flags;
+ /*
+ * Big sized spi nor are often stacked devices, thus sometime
+ * replace chip erase with die erase.
+ * This field inform how many die is in the chip.
+ */
+ uint8_t die_cnt;
} FlashPartInfo;
/* adapted from linux */
@@ -90,7 +97,8 @@ typedef struct FlashPartInfo {
.sector_size = (_sector_size),\
.n_sectors = (_n_sectors),\
.page_size = 256,\
- .flags = (_flags),
+ .flags = (_flags),\
+ .die_cnt = 0
#define INFO6(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags)\
.part_name = _part_name,\
@@ -107,6 +115,24 @@ typedef struct FlashPartInfo {
.n_sectors = (_n_sectors),\
.page_size = 256,\
.flags = (_flags),\
+ .die_cnt = 0
+
+#define INFO_STACKED(_part_name, _jedec_id, _ext_id, _sector_size, _n_sectors,\
+ _flags, _die_cnt)\
+ .part_name = _part_name,\
+ .id = {\
+ ((_jedec_id) >> 16) & 0xff,\
+ ((_jedec_id) >> 8) & 0xff,\
+ (_jedec_id) & 0xff,\
+ ((_ext_id) >> 8) & 0xff,\
+ (_ext_id) & 0xff,\
+ },\
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),\
+ .sector_size = (_sector_size),\
+ .n_sectors = (_n_sectors),\
+ .page_size = 256,\
+ .flags = (_flags),\
+ .die_cnt = _die_cnt
#define JEDEC_NUMONYX 0x20
#define JEDEC_WINBOND 0xEF
@@ -121,7 +147,7 @@ typedef struct FlashPartInfo {
#define CFG_DUMMY_CLK_LEN 4
#define NVCFG_DUMMY_CLK_POS 12
#define VCFG_DUMMY_CLK_POS 4
-#define EVCFG_OUT_DRIVER_STRENGHT_DEF 7
+#define EVCFG_OUT_DRIVER_STRENGTH_DEF 7
#define EVCFG_VPP_ACCELERATOR (1 << 3)
#define EVCFG_RESET_HOLD_ENABLED (1 << 4)
#define NVCFG_DUAL_IO_MASK (1 << 2)
@@ -203,6 +229,7 @@ static const FlashPartInfo known_devices[] = {
{ INFO("mx25l25655e", 0xc22619, 0, 64 << 10, 512, 0) },
{ INFO("mx66u51235f", 0xc2253a, 0, 64 << 10, 1024, ER_4K | ER_32K) },
{ INFO("mx66u1g45g", 0xc2253b, 0, 64 << 10, 2048, ER_4K | ER_32K) },
+ { INFO("mx66l1g45g", 0xc2201b, 0, 64 << 10, 2048, ER_4K | ER_32K) },
/* Micron */
{ INFO("n25q032a11", 0x20bb16, 0, 64 << 10, 64, ER_4K) },
@@ -216,8 +243,10 @@ static const FlashPartInfo known_devices[] = {
{ INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) },
{ INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
{ INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
- { INFO("mt25ql01g", 0x20ba21, 0, 64 << 10, 2048, ER_4K) },
- { INFO("mt25qu01g", 0x20bb21, 0, 64 << 10, 2048, ER_4K) },
+ { INFO_STACKED("n25q00", 0x20ba21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
+ { INFO_STACKED("n25q00a", 0x20bb21, 0x1000, 64 << 10, 2048, ER_4K, 4) },
+ { INFO_STACKED("mt25ql01g", 0x20ba21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
+ { INFO_STACKED("mt25qu01g", 0x20bb21, 0x1040, 64 << 10, 2048, ER_4K, 2) },
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
@@ -325,6 +354,7 @@ typedef enum {
PP4_4 = 0x3e,
DPP = 0xa2,
QPP = 0x32,
+ QPP_4 = 0x34,
ERASE_4K = 0x20,
ERASE4_4K = 0x21,
@@ -357,6 +387,8 @@ typedef enum {
REVCR = 0x65,
WEVCR = 0x61,
+
+ DIE_ERASE = 0xC4,
} FlashCMD;
typedef enum {
@@ -376,6 +408,8 @@ typedef enum {
MAN_GENERIC,
} Manufacturer;
+#define M25P80_INTERNAL_DATA_BUFFER_SZ 16
+
typedef struct Flash {
SSISlave parent_obj;
@@ -386,7 +420,7 @@ typedef struct Flash {
int page_size;
uint8_t state;
- uint8_t data[16];
+ uint8_t data[M25P80_INTERNAL_DATA_BUFFER_SZ];
uint32_t len;
uint32_t pos;
uint8_t needed_bytes;
@@ -512,6 +546,16 @@ static void flash_erase(Flash *s, int offset, FlashCMD cmd)
case BULK_ERASE:
len = s->size;
break;
+ case DIE_ERASE:
+ if (s->pi->die_cnt) {
+ len = s->size / s->pi->die_cnt;
+ offset = offset & (~(len - 1));
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "M25P80: die erase is not supported"
+ " by device\n");
+ return;
+ }
+ break;
default:
abort();
}
@@ -573,6 +617,7 @@ static inline int get_addr_length(Flash *s)
switch (s->cmd_in_progress) {
case PP4:
case PP4_4:
+ case QPP_4:
case READ4:
case QIOR4:
case ERASE4_4K:
@@ -606,6 +651,7 @@ static void complete_collecting_data(Flash *s)
switch (s->cmd_in_progress) {
case DPP:
case QPP:
+ case QPP_4:
case PP:
case PP4:
case PP4_4:
@@ -631,6 +677,7 @@ static void complete_collecting_data(Flash *s)
case ERASE4_32K:
case ERASE_SECTOR:
case ERASE4_SECTOR:
+ case DIE_ERASE:
flash_erase(s, s->cur_addr, s->cmd_in_progress);
break;
case WRSR:
@@ -700,7 +747,7 @@ static void reset_memory(Flash *s)
);
s->enh_volatile_cfg = 0;
- s->enh_volatile_cfg |= EVCFG_OUT_DRIVER_STRENGHT_DEF;
+ s->enh_volatile_cfg |= EVCFG_OUT_DRIVER_STRENGTH_DEF;
s->enh_volatile_cfg |= EVCFG_VPP_ACCELERATOR;
s->enh_volatile_cfg |= EVCFG_RESET_HOLD_ENABLED;
if (s->nonvolatile_cfg & NVCFG_DUAL_IO_MASK) {
@@ -873,9 +920,11 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case READ4:
case DPP:
case QPP:
+ case QPP_4:
case PP:
case PP4:
case PP4_4:
+ case DIE_ERASE:
s->needed_bytes = get_addr_length(s);
s->pos = 0;
s->len = 0;
@@ -1114,6 +1163,17 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
case STATE_COLLECTING_DATA:
case STATE_COLLECTING_VAR_LEN_DATA:
+
+ if (s->len >= M25P80_INTERNAL_DATA_BUFFER_SZ) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M25P80: Write overrun internal data buffer. "
+ "SPI controller (QEMU emulator or guest driver) "
+ "is misbehaving\n");
+ s->len = s->pos = 0;
+ s->state = STATE_IDLE;
+ break;
+ }
+
s->data[s->len] = (uint8_t)tx;
s->len++;
@@ -1123,6 +1183,17 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
break;
case STATE_READING_DATA:
+
+ if (s->pos >= M25P80_INTERNAL_DATA_BUFFER_SZ) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M25P80: Read overrun internal data buffer. "
+ "SPI controller (QEMU emulator or guest driver) "
+ "is misbehaving\n");
+ s->len = s->pos = 0;
+ s->state = STATE_IDLE;
+ break;
+ }
+
r = s->data[s->pos];
s->pos++;
if (s->pos == s->len) {
@@ -1195,7 +1266,7 @@ static const VMStateDescription vmstate_m25p80 = {
.pre_save = m25p80_pre_save,
.fields = (VMStateField[]) {
VMSTATE_UINT8(state, Flash),
- VMSTATE_UINT8_ARRAY(data, Flash, 16),
+ VMSTATE_UINT8_ARRAY(data, Flash, M25P80_INTERNAL_DATA_BUFFER_SZ),
VMSTATE_UINT32(len, Flash),
VMSTATE_UINT32(pos, Flash),
VMSTATE_UINT8(needed_bytes, Flash),
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 0c5fd27593..702eda863e 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -588,13 +588,19 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq)
blk_io_plug(s->blk);
- while ((req = virtio_blk_get_request(s, vq))) {
- if (virtio_blk_handle_request(req, &mrb)) {
- virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_blk_free_request(req);
- break;
+ do {
+ virtio_queue_set_notification(vq, 0);
+
+ while ((req = virtio_blk_get_request(s, vq))) {
+ if (virtio_blk_handle_request(req, &mrb)) {
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_blk_free_request(req);
+ break;
+ }
}
- }
+
+ virtio_queue_set_notification(vq, 1);
+ } while (!virtio_queue_empty(vq));
if (mrb.num_reqs) {
virtio_blk_submit_multireq(s->blk, &mrb);
@@ -857,7 +863,7 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
}
}
- req = qemu_get_virtqueue_element(f, sizeof(VirtIOBlockReq));
+ req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq));
virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req);
req->next = s->rq;
s->rq = req;
diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c
index 0215d6518d..4dcee571c0 100644
--- a/hw/char/cadence_uart.c
+++ b/hw/char/cadence_uart.c
@@ -138,9 +138,10 @@ static void fifo_trigger_update(void *opaque)
{
CadenceUARTState *s = opaque;
- s->r[R_CISR] |= UART_INTR_TIMEOUT;
-
- uart_update_status(s);
+ if (s->r[R_RTOR]) {
+ s->r[R_CISR] |= UART_INTR_TIMEOUT;
+ uart_update_status(s);
+ }
}
static void uart_rx_reset(CadenceUARTState *s)
@@ -502,6 +503,13 @@ static int cadence_uart_post_load(void *opaque, int version_id)
{
CadenceUARTState *s = opaque;
+ /* Ensure these two aren't invalid numbers */
+ if (s->r[R_BRGR] < 1 || s->r[R_BRGR] & ~0xFFFF ||
+ s->r[R_BDIV] <= 3 || s->r[R_BDIV] & ~0xFF) {
+ /* Value is invalid, abort */
+ return 1;
+ }
+
uart_parameters_setup(s);
uart_update_status(s);
return 0;
diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c
index 571c324004..820d1abeb9 100644
--- a/hw/char/exynos4210_uart.c
+++ b/hw/char/exynos4210_uart.c
@@ -629,22 +629,26 @@ DeviceState *exynos4210_uart_create(hwaddr addr,
return dev;
}
-static int exynos4210_uart_init(SysBusDevice *dev)
+static void exynos4210_uart_init(Object *obj)
{
+ SysBusDevice *dev = SYS_BUS_DEVICE(obj);
Exynos4210UartState *s = EXYNOS4210_UART(dev);
/* memory mapping */
- memory_region_init_io(&s->iomem, OBJECT(s), &exynos4210_uart_ops, s,
+ memory_region_init_io(&s->iomem, obj, &exynos4210_uart_ops, s,
"exynos4210.uart", EXYNOS4210_UART_REGS_MEM_SIZE);
sysbus_init_mmio(dev, &s->iomem);
sysbus_init_irq(dev, &s->irq);
+}
+
+static void exynos4210_uart_realize(DeviceState *dev, Error **errp)
+{
+ Exynos4210UartState *s = EXYNOS4210_UART(dev);
qemu_chr_fe_set_handlers(&s->chr, exynos4210_uart_can_receive,
exynos4210_uart_receive, exynos4210_uart_event,
s, NULL, true);
-
- return 0;
}
static Property exynos4210_uart_properties[] = {
@@ -658,9 +662,8 @@ static Property exynos4210_uart_properties[] = {
static void exynos4210_uart_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
- SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = exynos4210_uart_init;
+ dc->realize = exynos4210_uart_realize;
dc->reset = exynos4210_uart_reset;
dc->props = exynos4210_uart_properties;
dc->vmsd = &vmstate_exynos4210_uart;
@@ -670,6 +673,7 @@ static const TypeInfo exynos4210_uart_info = {
.name = TYPE_EXYNOS4210_UART,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(Exynos4210UartState),
+ .instance_init = exynos4210_uart_init,
.class_init = exynos4210_uart_class_init,
};
diff --git a/hw/char/serial.c b/hw/char/serial.c
index ffbacd8227..67b18eda12 100644
--- a/hw/char/serial.c
+++ b/hw/char/serial.c
@@ -906,6 +906,16 @@ void serial_realize_core(SerialState *s, Error **errp)
void serial_exit_core(SerialState *s)
{
qemu_chr_fe_deinit(&s->chr);
+
+ timer_del(s->modem_status_poll);
+ timer_free(s->modem_status_poll);
+
+ timer_del(s->fifo_timeout_timer);
+ timer_free(s->fifo_timeout_timer);
+
+ fifo8_destroy(&s->recv_fifo);
+ fifo8_destroy(&s->xmit_fifo);
+
qemu_unregister_reset(serial_reset, s);
}
diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c
index 7975c2cda1..d544cd91c0 100644
--- a/hw/char/virtio-serial-bus.c
+++ b/hw/char/virtio-serial-bus.c
@@ -732,6 +732,7 @@ static void virtio_serial_post_load_timer_cb(void *opaque)
static int fetch_active_ports_list(QEMUFile *f,
VirtIOSerial *s, uint32_t nr_active_ports)
{
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
uint32_t i;
s->post_load = g_malloc0(sizeof(*s->post_load));
@@ -765,7 +766,7 @@ static int fetch_active_ports_list(QEMUFile *f,
qemu_get_be64s(f, &port->iov_offset);
port->elem =
- qemu_get_virtqueue_element(f, sizeof(VirtQueueElement));
+ qemu_get_virtqueue_element(vdev, f, sizeof(VirtQueueElement));
/*
* Port was throttled on source machine. Let's
diff --git a/hw/core/Makefile.objs b/hw/core/Makefile.objs
index a4c94e522d..7f8c9dc659 100644
--- a/hw/core/Makefile.objs
+++ b/hw/core/Makefile.objs
@@ -1,6 +1,6 @@
# core qdev-related obj files, also used by *-user:
common-obj-y += qdev.o qdev-properties.o
-common-obj-y += bus.o
+common-obj-y += bus.o reset.o
common-obj-y += fw-path-provider.o
# irq.o needed for qdev GPIO handling:
common-obj-y += irq.o
@@ -12,7 +12,6 @@ common-obj-$(CONFIG_XILINX_AXI) += stream.o
common-obj-$(CONFIG_PTIMER) += ptimer.o
common-obj-$(CONFIG_SOFTMMU) += sysbus.o
common-obj-$(CONFIG_SOFTMMU) += machine.o
-common-obj-$(CONFIG_SOFTMMU) += null-machine.o
common-obj-$(CONFIG_SOFTMMU) += loader.o
common-obj-$(CONFIG_SOFTMMU) += qdev-properties-system.o
common-obj-$(CONFIG_SOFTMMU) += register.o
@@ -20,3 +19,4 @@ common-obj-$(CONFIG_SOFTMMU) += or-irq.o
common-obj-$(CONFIG_PLATFORM_BUS) += platform-bus.o
obj-$(CONFIG_SOFTMMU) += generic-loader.o
+obj-$(CONFIG_SOFTMMU) += null-machine.o
diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c
index 208f549dff..58f1f02902 100644
--- a/hw/core/generic-loader.c
+++ b/hw/core/generic-loader.c
@@ -27,7 +27,7 @@
* this it needs a backend to manage the datas, the same as other
* memory-related devices. In this case as the backend is so trivial we
* have merged it with the frontend instead of creating and maintaining a
- * seperate backend.
+ * separate backend.
*/
#include "qemu/osdep.h"
@@ -79,7 +79,7 @@ static void generic_loader_realize(DeviceState *dev, Error **errp)
"loading memory values");
return;
} else if (!s->data_len) {
- /* We cant' check for !data here as a value of 0 is still valid. */
+ /* We can't check for !data here as a value of 0 is still valid. */
error_setg(errp, "Both data and data-len must be specified");
return;
} else if (s->data_len > 8) {
diff --git a/hw/core/loader.c b/hw/core/loader.c
index 45742494e6..ee5abd6eb7 100644
--- a/hw/core/loader.c
+++ b/hw/core/loader.c
@@ -853,7 +853,7 @@ static void fw_cfg_resized(const char *id, uint64_t length, void *host)
}
}
-static void *rom_set_mr(Rom *rom, Object *owner, const char *name)
+static void *rom_set_mr(Rom *rom, Object *owner, const char *name, bool ro)
{
void *data;
@@ -862,7 +862,7 @@ static void *rom_set_mr(Rom *rom, Object *owner, const char *name)
rom->datasize, rom->romsize,
fw_cfg_resized,
&error_fatal);
- memory_region_set_readonly(rom->mr, true);
+ memory_region_set_readonly(rom->mr, ro);
vmstate_register_ram_global(rom->mr);
data = memory_region_get_ram_ptr(rom->mr);
@@ -942,7 +942,7 @@ int rom_add_file(const char *file, const char *fw_dir,
snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name);
if ((!option_rom || mc->option_rom_has_mr) && mc->rom_file_has_mr) {
- data = rom_set_mr(rom, OBJECT(fw_cfg), devpath);
+ data = rom_set_mr(rom, OBJECT(fw_cfg), devpath, true);
} else {
data = rom->data;
}
@@ -979,7 +979,7 @@ err:
MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
size_t max_len, hwaddr addr, const char *fw_file_name,
FWCfgReadCallback fw_callback, void *callback_opaque,
- AddressSpace *as)
+ AddressSpace *as, bool read_only)
{
MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
Rom *rom;
@@ -998,10 +998,14 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
char devpath[100];
void *data;
- snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name);
+ if (read_only) {
+ snprintf(devpath, sizeof(devpath), "/rom@%s", fw_file_name);
+ } else {
+ snprintf(devpath, sizeof(devpath), "/ram@%s", fw_file_name);
+ }
if (mc->rom_file_has_mr) {
- data = rom_set_mr(rom, OBJECT(fw_cfg), devpath);
+ data = rom_set_mr(rom, OBJECT(fw_cfg), devpath, read_only);
mr = rom->mr;
} else {
data = rom->data;
@@ -1009,7 +1013,7 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
fw_cfg_add_file_callback(fw_cfg, fw_file_name,
fw_callback, callback_opaque,
- data, rom->datasize);
+ data, rom->datasize, read_only);
}
return mr;
}
diff --git a/hw/core/null-machine.c b/hw/core/null-machine.c
index 0351ba7828..27c8369b57 100644
--- a/hw/core/null-machine.c
+++ b/hw/core/null-machine.c
@@ -13,18 +13,41 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
+#include "qemu/error-report.h"
#include "hw/hw.h"
#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "cpu.h"
-static void machine_none_init(MachineState *machine)
+static void machine_none_init(MachineState *mch)
{
+ CPUState *cpu = NULL;
+
+ /* Initialize CPU (if a model has been specified) */
+ if (mch->cpu_model) {
+ cpu = cpu_init(mch->cpu_model);
+ if (!cpu) {
+ error_report("Unable to initialize CPU");
+ exit(1);
+ }
+ }
+
+ /* RAM at address zero */
+ if (mch->ram_size) {
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+
+ memory_region_allocate_system_memory(ram, NULL, "ram", mch->ram_size);
+ memory_region_add_subregion(get_system_memory(), 0, ram);
+ }
}
static void machine_none_machine_init(MachineClass *mc)
{
mc->desc = "empty machine";
mc->init = machine_none_init;
- mc->max_cpus = 0;
+ mc->max_cpus = 1;
+ mc->default_ram_size = 0;
}
DEFINE_MACHINE("none", machine_none_machine_init)
diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c
index 2a82768067..6ab4265eb4 100644
--- a/hw/core/qdev-properties.c
+++ b/hw/core/qdev-properties.c
@@ -711,7 +711,7 @@ static void get_pci_host_devaddr(Object *obj, Visitor *v, const char *name,
/*
* Catch "invalid" device reference from vfio-pci and allow the
- * default buffer representing the non-existant device to be used.
+ * default buffer representing the non-existent device to be used.
*/
if (~addr->domain || ~addr->bus || ~addr->slot || ~addr->function) {
rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%0d",
diff --git a/hw/core/reset.c b/hw/core/reset.c
new file mode 100644
index 0000000000..84c8869371
--- /dev/null
+++ b/hw/core/reset.c
@@ -0,0 +1,72 @@
+/*
+ * Reset handlers.
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/queue.h"
+#include "sysemu/reset.h"
+
+/* reset/shutdown handler */
+
+typedef struct QEMUResetEntry {
+ QTAILQ_ENTRY(QEMUResetEntry) entry;
+ QEMUResetHandler *func;
+ void *opaque;
+} QEMUResetEntry;
+
+static QTAILQ_HEAD(reset_handlers, QEMUResetEntry) reset_handlers =
+ QTAILQ_HEAD_INITIALIZER(reset_handlers);
+
+void qemu_register_reset(QEMUResetHandler *func, void *opaque)
+{
+ QEMUResetEntry *re = g_malloc0(sizeof(QEMUResetEntry));
+
+ re->func = func;
+ re->opaque = opaque;
+ QTAILQ_INSERT_TAIL(&reset_handlers, re, entry);
+}
+
+void qemu_unregister_reset(QEMUResetHandler *func, void *opaque)
+{
+ QEMUResetEntry *re;
+
+ QTAILQ_FOREACH(re, &reset_handlers, entry) {
+ if (re->func == func && re->opaque == opaque) {
+ QTAILQ_REMOVE(&reset_handlers, re, entry);
+ g_free(re);
+ return;
+ }
+ }
+}
+
+void qemu_devices_reset(void)
+{
+ QEMUResetEntry *re, *nre;
+
+ /* reset all devices */
+ QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) {
+ re->func(re->opaque);
+ }
+}
+
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index bdb092ee9d..379910db2d 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -294,7 +294,7 @@ static bool blit_region_is_unsafe(struct CirrusVGAState *s,
return false;
}
-static bool blit_is_unsafe(struct CirrusVGAState *s)
+static bool blit_is_unsafe(struct CirrusVGAState *s, bool dst_only)
{
/* should be the case, see cirrus_bitblt_start */
assert(s->cirrus_blt_width > 0);
@@ -308,6 +308,9 @@ static bool blit_is_unsafe(struct CirrusVGAState *s)
s->cirrus_blt_dstaddr & s->cirrus_addr_mask)) {
return true;
}
+ if (dst_only) {
+ return false;
+ }
if (blit_region_is_unsafe(s, s->cirrus_blt_srcpitch,
s->cirrus_blt_srcaddr & s->cirrus_addr_mask)) {
return true;
@@ -673,7 +676,7 @@ static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s,
dst = s->vga.vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask);
- if (blit_is_unsafe(s))
+ if (blit_is_unsafe(s, false))
return 0;
(*s->cirrus_rop) (s, dst, src,
@@ -691,7 +694,7 @@ static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop)
{
cirrus_fill_t rop_func;
- if (blit_is_unsafe(s)) {
+ if (blit_is_unsafe(s, true)) {
return 0;
}
rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1];
@@ -795,7 +798,7 @@ static int cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s)
{
- if (blit_is_unsafe(s))
+ if (blit_is_unsafe(s, false))
return 0;
return cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->vga.start_addr,
diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c
index df51358e72..25aa46c8c7 100644
--- a/hw/display/framebuffer.c
+++ b/hw/display/framebuffer.c
@@ -78,7 +78,7 @@ void framebuffer_update_display(
i = *first_row;
*first_row = -1;
- src_len = src_width * rows;
+ src_len = (hwaddr)src_width * rows;
mem = mem_section->mr;
if (!mem) {
diff --git a/hw/display/milkymist-tmu2.c b/hw/display/milkymist-tmu2.c
index 5c666f9b24..920374b985 100644
--- a/hw/display/milkymist-tmu2.c
+++ b/hw/display/milkymist-tmu2.c
@@ -257,7 +257,7 @@ static void tmu2_start(MilkymistTMU2State *s)
glColor4f(m, m, m, (float)(s->regs[R_ALPHA] + 1) / 64.0f);
/* Read the QEMU dest. framebuffer into the OpenGL framebuffer */
- fb_len = 2 * s->regs[R_DSTHRES] * s->regs[R_DSTVRES];
+ fb_len = 2ULL * s->regs[R_DSTHRES] * s->regs[R_DSTVRES];
fb = cpu_physical_memory_map(s->regs[R_DSTFBUF], &fb_len, 0);
if (fb == NULL) {
glDeleteTextures(1, &texture);
diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c
index d3017563f3..68a80b9d64 100644
--- a/hw/display/ssd0303.c
+++ b/hw/display/ssd0303.c
@@ -179,7 +179,7 @@ static int ssd0303_send(I2CSlave *i2c, uint8_t data)
return 0;
}
-static void ssd0303_event(I2CSlave *i2c, enum i2c_event event)
+static int ssd0303_event(I2CSlave *i2c, enum i2c_event event)
{
ssd0303_state *s = SSD0303(i2c);
@@ -193,6 +193,8 @@ static void ssd0303_event(I2CSlave *i2c, enum i2c_event event)
/* Nothing to do. */
break;
}
+
+ return 0;
}
static void ssd0303_update_display(void *opaque)
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
index 23f39de94d..f96a0c2e59 100644
--- a/hw/display/virtio-gpu-3d.c
+++ b/hw/display/virtio-gpu-3d.c
@@ -291,8 +291,11 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
return;
}
- virgl_renderer_resource_attach_iov(att_rb.resource_id,
- res_iovs, att_rb.nr_entries);
+ ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
+ res_iovs, att_rb.nr_entries);
+
+ if (ret != 0)
+ virtio_gpu_cleanup_mapping_iov(res_iovs, att_rb.nr_entries);
}
static void virgl_resource_detach_backing(VirtIOGPU *g,
@@ -371,8 +374,12 @@ static void virgl_cmd_get_capset(VirtIOGPU *g,
virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
&max_size);
- resp = g_malloc(sizeof(*resp) + max_size);
+ if (!max_size) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+ resp = g_malloc0(sizeof(*resp) + max_size);
resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
virgl_renderer_fill_caps(gc.capset_id,
gc.capset_version,
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 5f32e1aae9..444ca064c1 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -28,6 +28,8 @@
static struct virtio_gpu_simple_resource*
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
+static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
+
#ifdef CONFIG_VIRGL
#include <virglrenderer.h>
#define VIRGL(_g, _virgl, _simple, ...) \
@@ -338,10 +340,14 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
- res->image = pixman_image_create_bits(pformat,
- c2d.width,
- c2d.height,
- NULL, 0);
+
+ res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height;
+ if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
+ res->image = pixman_image_create_bits(pformat,
+ c2d.width,
+ c2d.height,
+ NULL, 0);
+ }
if (!res->image) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -353,13 +359,16 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
}
QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+ g->hostmem += res->hostmem;
}
static void virtio_gpu_resource_destroy(VirtIOGPU *g,
struct virtio_gpu_simple_resource *res)
{
pixman_image_unref(res->image);
+ virtio_gpu_cleanup_mapping(res);
QTAILQ_REMOVE(&g->reslist, res, next);
+ g->hostmem -= res->hostmem;
g_free(res);
}
@@ -705,6 +714,11 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
return;
}
+ if (res->iov) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
if (ret != 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
@@ -989,7 +1003,8 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = {
},
};
-static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
+static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
VirtIOGPU *g = opaque;
struct virtio_gpu_simple_resource *res;
@@ -1014,9 +1029,12 @@ static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
qemu_put_be32(f, 0); /* end of list */
vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
+
+ return 0;
}
-static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
+static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
VirtIOGPU *g = opaque;
struct virtio_gpu_simple_resource *res;
@@ -1024,6 +1042,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
uint32_t resource_id, pformat;
int i;
+ g->hostmem = 0;
+
resource_id = qemu_get_be32(f);
while (resource_id != 0) {
res = g_new0(struct virtio_gpu_simple_resource, 1);
@@ -1036,15 +1056,19 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
/* allocate */
pformat = get_pixman_format(res->format);
if (!pformat) {
+ g_free(res);
return -EINVAL;
}
res->image = pixman_image_create_bits(pformat,
res->width, res->height,
NULL, 0);
if (!res->image) {
+ g_free(res);
return -EINVAL;
}
+ res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height;
+
res->addrs = g_new(uint64_t, res->iov_cnt);
res->iov = g_new(struct iovec, res->iov_cnt);
@@ -1062,11 +1086,22 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
res->iov[i].iov_base =
cpu_physical_memory_map(res->addrs[i], &len, 1);
if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
+ /* Clean up the half-a-mapping we just created... */
+ if (res->iov[i].iov_base) {
+ cpu_physical_memory_unmap(res->iov[i].iov_base,
+ len, 0, 0);
+ }
+ /* ...and the mappings for previous loop iterations */
+ res->iov_cnt = i;
+ virtio_gpu_cleanup_mapping(res);
+ pixman_image_unref(res->image);
+ g_free(res);
return -EINVAL;
}
}
QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+ g->hostmem += res->hostmem;
resource_id = qemu_get_be32(f);
}
@@ -1101,6 +1136,7 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
VirtIOGPU *g = VIRTIO_GPU(qdev);
bool have_virgl;
+ Error *local_err = NULL;
int i;
if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
@@ -1108,14 +1144,6 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
return;
}
- g->config_size = sizeof(struct virtio_gpu_config);
- g->virtio_config.num_scanouts = g->conf.max_outputs;
- virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
- g->config_size);
-
- g->req_state[0].width = 1024;
- g->req_state[0].height = 768;
-
g->use_virgl_renderer = false;
#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
have_virgl = false;
@@ -1127,6 +1155,24 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
}
if (virtio_gpu_virgl_enabled(g->conf)) {
+ error_setg(&g->migration_blocker, "virgl is not yet migratable");
+ migrate_add_blocker(g->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(g->migration_blocker);
+ return;
+ }
+ }
+
+ g->config_size = sizeof(struct virtio_gpu_config);
+ g->virtio_config.num_scanouts = g->conf.max_outputs;
+ virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
+ g->config_size);
+
+ g->req_state[0].width = 1024;
+ g->req_state[0].height = 768;
+
+ if (virtio_gpu_virgl_enabled(g->conf)) {
/* use larger control queue in 3d mode */
g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
@@ -1152,11 +1198,6 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
dpy_gfx_replace_surface(g->scanout[i].con, NULL);
}
}
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- error_setg(&g->migration_blocker, "virgl is not yet migratable");
- migrate_add_blocker(g->migration_blocker);
- }
}
static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
@@ -1241,6 +1282,8 @@ static const VMStateDescription vmstate_virtio_gpu = {
static Property virtio_gpu_properties[] = {
DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
+ DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem,
+ 256 * 1024 * 1024),
#ifdef CONFIG_VIRGL
DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
@@ -1266,6 +1309,7 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
dc->props = virtio_gpu_properties;
dc->vmsd = &vmstate_virtio_gpu;
+ dc->hotpluggable = false;
}
static const TypeInfo virtio_gpu_info = {
diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c
index f43eb09304..f7b7b80c68 100644
--- a/hw/display/xlnx_dp.c
+++ b/hw/display/xlnx_dp.c
@@ -555,7 +555,7 @@ static void xlnx_dp_recreate_surface(XlnxDPState *s)
if ((width != 0) && (height != 0)) {
/*
* As dpy_gfx_replace_surface calls qemu_free_displaysurface on the
- * surface we need to be carefull and don't free the surface associated
+ * surface we need to be careful and don't free the surface associated
* to the console or double free will happen.
*/
if (s->bout_plane.surface != current_console_surface) {
@@ -1160,7 +1160,7 @@ static void xlnx_dp_update_display(void *opaque)
*/
if (!xlnx_dpdma_start_operation(s->dpdma, 3, false)) {
/*
- * An error occured don't do anything with the data..
+ * An error occurred don't do anything with the data..
* Trigger an underflow interrupt.
*/
s->core_registers[DP_INT_STATUS] |= (1 << 21);
diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c
index 1bd5eaf911..f82e3e6555 100644
--- a/hw/gpio/max7310.c
+++ b/hw/gpio/max7310.c
@@ -129,7 +129,7 @@ static int max7310_tx(I2CSlave *i2c, uint8_t data)
return 0;
}
-static void max7310_event(I2CSlave *i2c, enum i2c_event event)
+static int max7310_event(I2CSlave *i2c, enum i2c_event event)
{
MAX7310State *s = MAX7310(i2c);
s->len = 0;
@@ -147,6 +147,8 @@ static void max7310_event(I2CSlave *i2c, enum i2c_event event)
default:
break;
}
+
+ return 0;
}
static const VMStateDescription vmstate_max7310 = {
diff --git a/hw/i2c/core.c b/hw/i2c/core.c
index abd4c4cddb..2c1234cdff 100644
--- a/hw/i2c/core.c
+++ b/hw/i2c/core.c
@@ -88,18 +88,26 @@ int i2c_bus_busy(I2CBus *bus)
return !QLIST_EMPTY(&bus->current_devs);
}
+/* TODO: Make this handle multiple masters. */
/*
- * Returns non-zero if the address is not valid. If this is called
- * again without an intervening i2c_end_transfer(), like in the SMBus
- * case where the operation is switched from write to read, this
- * function will not rescan the bus and thus cannot fail.
+ * Start or continue an i2c transaction. When this is called for the
+ * first time or after an i2c_end_transfer(), if it returns an error
+ * the bus transaction is terminated (or really never started). If
+ * this is called after another i2c_start_transfer() without an
+ * intervening i2c_end_transfer(), and it returns an error, the
+ * transaction will not be terminated. The caller must do it.
+ *
+ * This corresponds with the way real hardware works. The SMBus
+ * protocol uses a start transfer to switch from write to read mode
+ * without releasing the bus. If that fails, the bus is still
+ * in a transaction.
*/
-/* TODO: Make this handle multiple masters. */
int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv)
{
BusChild *kid;
I2CSlaveClass *sc;
I2CNode *node;
+ bool bus_scanned = false;
if (address == I2C_BROADCAST) {
/*
@@ -130,6 +138,7 @@ int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv)
}
}
}
+ bus_scanned = true;
}
if (QLIST_EMPTY(&bus->current_devs)) {
@@ -137,11 +146,21 @@ int i2c_start_transfer(I2CBus *bus, uint8_t address, int recv)
}
QLIST_FOREACH(node, &bus->current_devs, next) {
+ int rv;
+
sc = I2C_SLAVE_GET_CLASS(node->elt);
/* If the bus is already busy, assume this is a repeated
start condition. */
+
if (sc->event) {
- sc->event(node->elt, recv ? I2C_START_RECV : I2C_START_SEND);
+ rv = sc->event(node->elt, recv ? I2C_START_RECV : I2C_START_SEND);
+ if (rv && !bus->broadcast) {
+ if (bus_scanned) {
+ /* First call, terminate the transfer. */
+ i2c_end_transfer(bus);
+ }
+ return rv;
+ }
}
}
return 0;
@@ -260,7 +279,11 @@ static int i2c_slave_qdev_init(DeviceState *dev)
I2CSlave *s = I2C_SLAVE(dev);
I2CSlaveClass *sc = I2C_SLAVE_GET_CLASS(s);
- return sc->init(s);
+ if (sc->init) {
+ return sc->init(s);
+ }
+
+ return 0;
}
DeviceState *i2c_create_slave(I2CBus *bus, const char *name, uint8_t addr)
diff --git a/hw/i2c/i2c-ddc.c b/hw/i2c/i2c-ddc.c
index 1227212934..66899d7233 100644
--- a/hw/i2c/i2c-ddc.c
+++ b/hw/i2c/i2c-ddc.c
@@ -230,13 +230,15 @@ static void i2c_ddc_reset(DeviceState *ds)
s->reg = 0;
}
-static void i2c_ddc_event(I2CSlave *i2c, enum i2c_event event)
+static int i2c_ddc_event(I2CSlave *i2c, enum i2c_event event)
{
I2CDDCState *s = I2CDDC(i2c);
if (event == I2C_START_SEND) {
s->firstbyte = true;
}
+
+ return 0;
}
static int i2c_ddc_rx(I2CSlave *i2c)
diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c
index 37e5a62ce7..6c81b98ebd 100644
--- a/hw/i2c/imx_i2c.c
+++ b/hw/i2c/imx_i2c.c
@@ -310,7 +310,7 @@ static void imx_i2c_realize(DeviceState *dev, Error **errp)
IMX_I2C_MEM_SIZE);
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
- s->bus = i2c_init_bus(DEVICE(dev), "i2c");
+ s->bus = i2c_init_bus(DEVICE(dev), NULL);
}
static void imx_i2c_class_init(ObjectClass *klass, void *data)
diff --git a/hw/i2c/smbus.c b/hw/i2c/smbus.c
index 5b4dd3eba4..2d1b79a689 100644
--- a/hw/i2c/smbus.c
+++ b/hw/i2c/smbus.c
@@ -67,7 +67,7 @@ static void smbus_do_write(SMBusDevice *dev)
}
}
-static void smbus_i2c_event(I2CSlave *s, enum i2c_event event)
+static int smbus_i2c_event(I2CSlave *s, enum i2c_event event)
{
SMBusDevice *dev = SMBUS_DEVICE(s);
@@ -148,6 +148,8 @@ static void smbus_i2c_event(I2CSlave *s, enum i2c_event event)
break;
}
}
+
+ return 0;
}
static int smbus_i2c_recv(I2CSlave *s)
@@ -249,7 +251,8 @@ int smbus_read_byte(I2CBus *bus, uint8_t addr, uint8_t command)
}
i2c_send(bus, command);
if (i2c_start_transfer(bus, addr, 1)) {
- assert(0);
+ i2c_end_transfer(bus);
+ return -1;
}
data = i2c_recv(bus);
i2c_nack(bus);
@@ -276,7 +279,8 @@ int smbus_read_word(I2CBus *bus, uint8_t addr, uint8_t command)
}
i2c_send(bus, command);
if (i2c_start_transfer(bus, addr, 1)) {
- assert(0);
+ i2c_end_transfer(bus);
+ return -1;
}
data = i2c_recv(bus);
data |= i2c_recv(bus) << 8;
@@ -307,7 +311,8 @@ int smbus_read_block(I2CBus *bus, uint8_t addr, uint8_t command, uint8_t *data)
}
i2c_send(bus, command);
if (i2c_start_transfer(bus, addr, 1)) {
- assert(0);
+ i2c_end_transfer(bus);
+ return -1;
}
len = i2c_recv(bus);
if (len > 32) {
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 42ecf619d5..1c928abb28 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -101,8 +101,6 @@ typedef struct AcpiPmInfo {
uint32_t gpe0_blk_len;
uint32_t io_base;
uint16_t cpu_hp_io_base;
- uint16_t mem_hp_io_base;
- uint16_t mem_hp_io_len;
uint16_t pcihp_io_base;
uint16_t pcihp_io_len;
} AcpiPmInfo;
@@ -148,9 +146,6 @@ static void acpi_get_pm_info(AcpiPmInfo *pm)
}
assert(obj);
- pm->mem_hp_io_base = ACPI_MEMORY_HOTPLUG_BASE;
- pm->mem_hp_io_len = ACPI_MEMORY_HOTPLUG_IO_LEN;
-
/* Fill in optional s3/s4 related properties */
o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
if (o) {
@@ -337,7 +332,7 @@ build_fadt(GArray *table_data, BIOSLinker *linker, AcpiPmInfo *pm,
}
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
- CPUArchIdList *apic_ids, GArray *entry)
+ const CPUArchIdList *apic_ids, GArray *entry)
{
uint32_t apic_id = apic_ids->cpus[uid].arch_id;
@@ -378,7 +373,7 @@ static void
build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
{
MachineClass *mc = MACHINE_GET_CLASS(pcms);
- CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms));
+ const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(pcms));
int madt_start = table_data->len;
AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(pcms->acpi_dev);
AcpiDeviceIf *adev = ACPI_DEVICE_IF(pcms->acpi_dev);
@@ -399,7 +394,6 @@ build_madt(GArray *table_data, BIOSLinker *linker, PCMachineState *pcms)
x2apic_mode = true;
}
}
- g_free(apic_ids);
io_apic = acpi_data_push(table_data, sizeof *io_apic);
io_apic->type = ACPI_APIC_IO;
@@ -1038,130 +1032,6 @@ static Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set)
return crs;
}
-static void build_memory_devices(Aml *sb_scope, int nr_mem,
- uint16_t io_base, uint16_t io_len)
-{
- int i;
- Aml *scope;
- Aml *crs;
- Aml *field;
- Aml *dev;
- Aml *method;
- Aml *ifctx;
-
- /* build memory devices */
- assert(nr_mem <= ACPI_MAX_RAM_SLOTS);
- scope = aml_scope("\\_SB.PCI0." MEMORY_HOTPLUG_DEVICE);
- aml_append(scope,
- aml_name_decl(MEMORY_SLOTS_NUMBER, aml_int(nr_mem))
- );
-
- crs = aml_resource_template();
- aml_append(crs,
- aml_io(AML_DECODE16, io_base, io_base, 0, io_len)
- );
- aml_append(scope, aml_name_decl("_CRS", crs));
-
- aml_append(scope, aml_operation_region(
- MEMORY_HOTPLUG_IO_REGION, AML_SYSTEM_IO,
- aml_int(io_base), io_len)
- );
-
- field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC,
- AML_NOLOCK, AML_PRESERVE);
- aml_append(field, /* read only */
- aml_named_field(MEMORY_SLOT_ADDR_LOW, 32));
- aml_append(field, /* read only */
- aml_named_field(MEMORY_SLOT_ADDR_HIGH, 32));
- aml_append(field, /* read only */
- aml_named_field(MEMORY_SLOT_SIZE_LOW, 32));
- aml_append(field, /* read only */
- aml_named_field(MEMORY_SLOT_SIZE_HIGH, 32));
- aml_append(field, /* read only */
- aml_named_field(MEMORY_SLOT_PROXIMITY, 32));
- aml_append(scope, field);
-
- field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_BYTE_ACC,
- AML_NOLOCK, AML_WRITE_AS_ZEROS);
- aml_append(field, aml_reserved_field(160 /* bits, Offset(20) */));
- aml_append(field, /* 1 if enabled, read only */
- aml_named_field(MEMORY_SLOT_ENABLED, 1));
- aml_append(field,
- /*(read) 1 if has a insert event. (write) 1 to clear event */
- aml_named_field(MEMORY_SLOT_INSERT_EVENT, 1));
- aml_append(field,
- /* (read) 1 if has a remove event. (write) 1 to clear event */
- aml_named_field(MEMORY_SLOT_REMOVE_EVENT, 1));
- aml_append(field,
- /* initiates device eject, write only */
- aml_named_field(MEMORY_SLOT_EJECT, 1));
- aml_append(scope, field);
-
- field = aml_field(MEMORY_HOTPLUG_IO_REGION, AML_DWORD_ACC,
- AML_NOLOCK, AML_PRESERVE);
- aml_append(field, /* DIMM selector, write only */
- aml_named_field(MEMORY_SLOT_SLECTOR, 32));
- aml_append(field, /* _OST event code, write only */
- aml_named_field(MEMORY_SLOT_OST_EVENT, 32));
- aml_append(field, /* _OST status code, write only */
- aml_named_field(MEMORY_SLOT_OST_STATUS, 32));
- aml_append(scope, field);
- aml_append(sb_scope, scope);
-
- for (i = 0; i < nr_mem; i++) {
- #define BASEPATH "\\_SB.PCI0." MEMORY_HOTPLUG_DEVICE "."
- const char *s;
-
- dev = aml_device("MP%02X", i);
- aml_append(dev, aml_name_decl("_UID", aml_string("0x%02X", i)));
- aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C80")));
-
- method = aml_method("_CRS", 0, AML_NOTSERIALIZED);
- s = BASEPATH MEMORY_SLOT_CRS_METHOD;
- aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
- aml_append(dev, method);
-
- method = aml_method("_STA", 0, AML_NOTSERIALIZED);
- s = BASEPATH MEMORY_SLOT_STATUS_METHOD;
- aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
- aml_append(dev, method);
-
- method = aml_method("_PXM", 0, AML_NOTSERIALIZED);
- s = BASEPATH MEMORY_SLOT_PROXIMITY_METHOD;
- aml_append(method, aml_return(aml_call1(s, aml_name("_UID"))));
- aml_append(dev, method);
-
- method = aml_method("_OST", 3, AML_NOTSERIALIZED);
- s = BASEPATH MEMORY_SLOT_OST_METHOD;
-
- aml_append(method, aml_return(aml_call4(
- s, aml_name("_UID"), aml_arg(0), aml_arg(1), aml_arg(2)
- )));
- aml_append(dev, method);
-
- method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
- s = BASEPATH MEMORY_SLOT_EJECT_METHOD;
- aml_append(method, aml_return(aml_call2(
- s, aml_name("_UID"), aml_arg(0))));
- aml_append(dev, method);
-
- aml_append(sb_scope, dev);
- }
-
- /* build Method(MEMORY_SLOT_NOTIFY_METHOD, 2) {
- * If (LEqual(Arg0, 0x00)) {Notify(MP00, Arg1)} ... }
- */
- method = aml_method(MEMORY_SLOT_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
- for (i = 0; i < nr_mem; i++) {
- ifctx = aml_if(aml_equal(aml_arg(0), aml_int(i)));
- aml_append(ifctx,
- aml_notify(aml_name("MP%.02X", i), aml_arg(1))
- );
- aml_append(method, ifctx);
- }
- aml_append(sb_scope, method);
-}
-
static void build_hpet_aml(Aml *table)
{
Aml *crs;
@@ -2049,8 +1919,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base,
"\\_SB.PCI0", "\\_GPE._E02");
}
- build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base,
- pm->mem_hp_io_len);
+ build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0", "\\_GPE._E03");
scope = aml_scope("_GPE");
{
@@ -2065,10 +1934,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(scope, method);
}
- method = aml_method("_E03", 0, AML_NOTSERIALIZED);
- aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH));
- aml_append(scope, method);
-
if (pcms->acpi_nvdimm_state.is_enabled) {
method = aml_method("_E04", 0, AML_NOTSERIALIZED);
aml_append(method, aml_notify(aml_name("\\_SB.NVDR"),
@@ -2321,45 +2186,40 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
sb_scope = aml_scope("\\_SB");
{
- build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base,
- pm->mem_hp_io_len);
+ Object *pci_host;
+ PCIBus *bus = NULL;
- {
- Object *pci_host;
- PCIBus *bus = NULL;
+ pci_host = acpi_get_i386_pci_host();
+ if (pci_host) {
+ bus = PCI_HOST_BRIDGE(pci_host)->bus;
+ }
- pci_host = acpi_get_i386_pci_host();
- if (pci_host) {
- bus = PCI_HOST_BRIDGE(pci_host)->bus;
+ if (bus) {
+ Aml *scope = aml_scope("PCI0");
+ /* Scan all PCI buses. Generate tables to support hotplug. */
+ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en);
+
+ if (misc->tpm_version != TPM_VERSION_UNSPEC) {
+ dev = aml_device("ISA.TPM");
+ aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31")));
+ aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
+ crs = aml_resource_template();
+ aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
+ TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
+ /*
+ FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs,
+ Rewrite to take IRQ from TPM device model and
+ fix default IRQ value there to use some unused IRQ
+ */
+ /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
}
- if (bus) {
- Aml *scope = aml_scope("PCI0");
- /* Scan all PCI buses. Generate tables to support hotplug. */
- build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en);
-
- if (misc->tpm_version != TPM_VERSION_UNSPEC) {
- dev = aml_device("ISA.TPM");
- aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31")));
- aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
- crs = aml_resource_template();
- aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
- TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
- /*
- FIXME: TPM_TIS_IRQ=5 conflicts with PNP0C0F irqs,
- Rewrite to take IRQ from TPM device model and
- fix default IRQ value there to use some unused IRQ
- */
- /* aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); */
- aml_append(dev, aml_name_decl("_CRS", crs));
- aml_append(scope, dev);
- }
-
- aml_append(sb_scope, scope);
- }
+ aml_append(sb_scope, scope);
}
- aml_append(dsdt, sb_scope);
}
+ aml_append(dsdt, sb_scope);
/* copy AML table into ACPI tables blob and patch header there */
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
@@ -2433,7 +2293,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
int srat_start, numa_start, slots;
uint64_t mem_len, mem_base, next_base;
MachineClass *mc = MACHINE_GET_CLASS(machine);
- CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
+ const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
PCMachineState *pcms = PC_MACHINE(machine);
ram_addr_t hotplugabble_address_space_size =
object_property_get_int(OBJECT(pcms), PC_MACHINE_MEMHP_REGION_SIZE,
@@ -2532,7 +2392,6 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
(void *)(table_data->data + srat_start),
"SRAT",
table_data->len - srat_start, 1, NULL, NULL);
- g_free(apic_ids);
}
static void
@@ -2575,6 +2434,7 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker)
AcpiTableDmar *dmar;
AcpiDmarHardwareUnit *drhd;
+ AcpiDmarRootPortATS *atsr;
uint8_t dmar_flags = 0;
X86IOMMUState *iommu = x86_iommu_get_default();
AcpiDmarDeviceScope *scope = NULL;
@@ -2608,6 +2468,14 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker)
scope->path[0].device = PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC);
scope->path[0].function = PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC);
+ if (iommu->dt_supported) {
+ atsr = acpi_data_push(table_data, sizeof(*atsr));
+ atsr->type = cpu_to_le16(ACPI_DMAR_TYPE_ATSR);
+ atsr->length = cpu_to_le16(sizeof(*atsr));
+ atsr->flags = ACPI_DMAR_ATSR_ALL_PORTS;
+ atsr->pci_segment = cpu_to_le16(0);
+ }
+
build_header(linker, table_data, (void *)(table_data->data + dmar_start),
"DMAR", table_data->len - dmar_start, 1, NULL, NULL);
}
@@ -2936,7 +2804,7 @@ static MemoryRegion *acpi_add_rom_blob(AcpiBuildState *build_state,
uint64_t max_size)
{
return rom_add_blob(name, blob->data, acpi_data_len(blob), max_size, -1,
- name, acpi_build_update, build_state, NULL);
+ name, acpi_build_update, build_state, NULL, true);
}
static const VMStateDescription vmstate_acpi_build = {
@@ -3002,7 +2870,7 @@ void acpi_setup(void)
build_state->rsdp = g_memdup(tables.rsdp->data, rsdp_size);
fw_cfg_add_file_callback(pcms->fw_cfg, ACPI_BUILD_RSDP_FILE,
acpi_build_update, build_state,
- build_state->rsdp, rsdp_size);
+ build_state->rsdp, rsdp_size, true);
build_state->rsdp_mr = NULL;
} else {
build_state->rsdp = NULL;
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index 47b79d9112..e0732ccaf1 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -562,7 +562,7 @@ static void amdvi_mmio_trace(hwaddr addr, unsigned size)
trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
} else {
index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
- trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
+ trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
}
}
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
index 884926e9e7..0d3dc6a9f2 100644
--- a/hw/i386/amd_iommu.h
+++ b/hw/i386/amd_iommu.h
@@ -49,8 +49,8 @@
#define AMDVI_CAPAB_INIT_TYPE (3 << 16)
/* No. of used MMIO registers */
-#define AMDVI_MMIO_REGS_HIGH 8
-#define AMDVI_MMIO_REGS_LOW 7
+#define AMDVI_MMIO_REGS_HIGH 7
+#define AMDVI_MMIO_REGS_LOW 8
/* MMIO registers */
#define AMDVI_MMIO_DEVICE_TABLE 0x0000
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 5f3e35123d..ec62239aba 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -738,11 +738,18 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
"context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
- } else if (ce->lo & VTD_CONTEXT_ENTRY_TT) {
- VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
- "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
- ce->hi, ce->lo);
- return -VTD_FR_CONTEXT_ENTRY_INV;
+ } else {
+ switch (ce->lo & VTD_CONTEXT_ENTRY_TT) {
+ case VTD_CONTEXT_TT_MULTI_LEVEL:
+ /* fall through */
+ case VTD_CONTEXT_TT_DEV_IOTLB:
+ break;
+ default:
+ VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
+ "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
+ ce->hi, ce->lo);
+ return -VTD_FR_CONTEXT_ENTRY_INV;
+ }
}
return 0;
}
@@ -1438,7 +1445,61 @@ static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
vtd_iec_notify_all(s, !inv_desc->iec.granularity,
inv_desc->iec.index,
inv_desc->iec.index_mask);
+ return true;
+}
+static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc)
+{
+ VTDAddressSpace *vtd_dev_as;
+ IOMMUTLBEntry entry;
+ struct VTDBus *vtd_bus;
+ hwaddr addr;
+ uint64_t sz;
+ uint16_t sid;
+ uint8_t devfn;
+ bool size;
+ uint8_t bus_num;
+
+ addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
+ sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
+ devfn = sid & 0xff;
+ bus_num = sid >> 8;
+ size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
+
+ if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
+ (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
+ VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Device "
+ "IOTLB Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
+ inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+
+ vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
+ if (!vtd_bus) {
+ goto done;
+ }
+
+ vtd_dev_as = vtd_bus->dev_as[devfn];
+ if (!vtd_dev_as) {
+ goto done;
+ }
+
+ if (size) {
+ sz = 1 << (ctz64(~(addr | (VTD_PAGE_MASK_4K - 1))) + 1);
+ addr &= ~(sz - 1);
+ } else {
+ sz = VTD_PAGE_SIZE;
+ }
+
+ entry.target_as = &vtd_dev_as->as;
+ entry.addr_mask = sz - 1;
+ entry.iova = addr;
+ entry.perm = IOMMU_NONE;
+ entry.translated_addr = 0;
+ memory_region_notify_iommu(entry.target_as->root, entry);
+
+done:
return true;
}
@@ -1490,6 +1551,14 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
}
break;
+ case VTD_INV_DESC_DEVICE:
+ VTD_DPRINTF(INV, "Device IOTLB Invalidation Descriptor hi 0x%"PRIx64
+ " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
+ return false;
+ }
+ break;
+
default:
VTD_DPRINTF(GENERAL, "error: unkonw Invalidation Descriptor type "
"hi 0x%"PRIx64 " lo 0x%"PRIx64 " type %"PRIu8,
@@ -1996,7 +2065,27 @@ static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
static const VMStateDescription vtd_vmstate = {
.name = "iommu-intel",
- .unmigratable = 1,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .priority = MIG_PRI_IOMMU,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(root, IntelIOMMUState),
+ VMSTATE_UINT64(intr_root, IntelIOMMUState),
+ VMSTATE_UINT64(iq, IntelIOMMUState),
+ VMSTATE_UINT32(intr_size, IntelIOMMUState),
+ VMSTATE_UINT16(iq_head, IntelIOMMUState),
+ VMSTATE_UINT16(iq_tail, IntelIOMMUState),
+ VMSTATE_UINT16(iq_size, IntelIOMMUState),
+ VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
+ VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
+ VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
+ VMSTATE_BOOL(root_extended, IntelIOMMUState),
+ VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
+ VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
+ VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
+ VMSTATE_BOOL(intr_eime, IntelIOMMUState),
+ VMSTATE_END_OF_LIST()
+ }
};
static const MemoryRegionOps vtd_mem_ops = {
@@ -2324,19 +2413,22 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
uintptr_t key = (uintptr_t)bus;
VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
VTDAddressSpace *vtd_dev_as;
+ char name[128];
if (!vtd_bus) {
+ uintptr_t *new_key = g_malloc(sizeof(*new_key));
+ *new_key = (uintptr_t)bus;
/* No corresponding free() */
vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
X86_IOMMU_PCI_DEVFN_MAX);
vtd_bus->bus = bus;
- key = (uintptr_t)bus;
- g_hash_table_insert(s->vtd_as_by_busptr, &key, vtd_bus);
+ g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
}
vtd_dev_as = vtd_bus->dev_as[devfn];
if (!vtd_dev_as) {
+ snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
vtd_dev_as->bus = bus;
@@ -2351,7 +2443,7 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
memory_region_add_subregion(&vtd_dev_as->iommu, VTD_INTERRUPT_ADDR_FIRST,
&vtd_dev_as->iommu_ir);
address_space_init(&vtd_dev_as->as,
- &vtd_dev_as->iommu, "intel_iommu");
+ &vtd_dev_as->iommu, name);
}
return vtd_dev_as;
}
@@ -2392,6 +2484,10 @@ static void vtd_init(IntelIOMMUState *s)
assert(s->intr_eim != ON_OFF_AUTO_AUTO);
}
+ if (x86_iommu->dt_supported) {
+ s->ecap |= VTD_ECAP_DT;
+ }
+
vtd_reset_context_cache(s);
vtd_reset_iotlb(s);
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index 11abfa2233..356f188b73 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -183,6 +183,7 @@
/* (offset >> 4) << 8 */
#define VTD_ECAP_IRO (DMAR_IOTLB_REG_OFFSET << 4)
#define VTD_ECAP_QI (1ULL << 1)
+#define VTD_ECAP_DT (1ULL << 2)
/* Interrupt Remapping support */
#define VTD_ECAP_IR (1ULL << 3)
#define VTD_ECAP_EIM (1ULL << 4)
@@ -326,6 +327,7 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_TYPE 0xf
#define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */
#define VTD_INV_DESC_IOTLB 0x2
+#define VTD_INV_DESC_DEVICE 0x3
#define VTD_INV_DESC_IEC 0x4 /* Interrupt Entry Cache
Invalidate Descriptor */
#define VTD_INV_DESC_WAIT 0x5 /* Invalidation Wait Descriptor */
@@ -361,6 +363,13 @@ typedef union VTDInvDesc VTDInvDesc;
#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL
#define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL
+/* Mask for Device IOTLB Invalidate Descriptor */
+#define VTD_INV_DESC_DEVICE_IOTLB_ADDR(val) ((val) & 0xfffffffffffff000ULL)
+#define VTD_INV_DESC_DEVICE_IOTLB_SIZE(val) ((val) & 0x1)
+#define VTD_INV_DESC_DEVICE_IOTLB_SID(val) (((val) >> 32) & 0xFFFFULL)
+#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL
+#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8
+
/* Information about page-selective IOTLB invalidate */
struct VTDIOTLBPageInvInfo {
uint16_t domain_id;
@@ -399,8 +408,8 @@ typedef struct VTDRootEntry VTDRootEntry;
#define VTD_CONTEXT_ENTRY_FPD (1ULL << 1) /* Fault Processing Disable */
#define VTD_CONTEXT_ENTRY_TT (3ULL << 2) /* Translation Type */
#define VTD_CONTEXT_TT_MULTI_LEVEL 0
-#define VTD_CONTEXT_TT_DEV_IOTLB 1
-#define VTD_CONTEXT_TT_PASS_THROUGH 2
+#define VTD_CONTEXT_TT_DEV_IOTLB (1ULL << 2)
+#define VTD_CONTEXT_TT_PASS_THROUGH (2ULL << 2)
/* Second Level Page Translation Pointer*/
#define VTD_CONTEXT_ENTRY_SLPTPTR (~0xfffULL)
#define VTD_CONTEXT_ENTRY_RSVD_LO (0xff0ULL | ~VTD_HAW_MASK)
diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c
index df5180b1e0..1df6d26816 100644
--- a/hw/i386/kvm/apic.c
+++ b/hw/i386/kvm/apic.c
@@ -14,6 +14,7 @@
#include "cpu.h"
#include "hw/i386/apic_internal.h"
#include "hw/pci/msi.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
#include "target/i386/kvm_i386.h"
diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c
index b30d1b90c6..702e281dc8 100644
--- a/hw/i386/kvmvapic.c
+++ b/hw/i386/kvmvapic.c
@@ -14,6 +14,7 @@
#include "exec/exec-all.h"
#include "sysemu/sysemu.h"
#include "sysemu/cpus.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
#include "hw/i386/apic_internal.h"
#include "hw/sysbus.h"
@@ -534,7 +535,6 @@ static int patch_hypercalls(VAPICROMState *s)
uint8_t alternates[2];
const uint8_t *pattern;
const uint8_t *patch;
- int patches = 0;
off_t pos;
uint8_t *rom;
@@ -565,11 +565,6 @@ static int patch_hypercalls(VAPICROMState *s)
}
g_free(rom);
-
- if (patches != 0 && patches != 2) {
- return -1;
- }
-
return 0;
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 25e8586b48..706e2330ac 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -701,16 +701,20 @@ static uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
}
}
-static void pc_build_smbios(FWCfgState *fw_cfg)
+static void pc_build_smbios(PCMachineState *pcms)
{
uint8_t *smbios_tables, *smbios_anchor;
size_t smbios_tables_len, smbios_anchor_len;
struct smbios_phys_mem_area *mem_array;
unsigned i, array_count;
+ X86CPU *cpu = X86_CPU(pcms->possible_cpus->cpus[0].cpu);
+
+ /* tell smbios about cpuid version and features */
+ smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]);
smbios_tables = smbios_get_table_legacy(&smbios_tables_len);
if (smbios_tables) {
- fw_cfg_add_bytes(fw_cfg, FW_CFG_SMBIOS_ENTRIES,
+ fw_cfg_add_bytes(pcms->fw_cfg, FW_CFG_SMBIOS_ENTRIES,
smbios_tables, smbios_tables_len);
}
@@ -731,9 +735,9 @@ static void pc_build_smbios(FWCfgState *fw_cfg)
g_free(mem_array);
if (smbios_anchor) {
- fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-tables",
+ fw_cfg_add_file(pcms->fw_cfg, "etc/smbios/smbios-tables",
smbios_tables, smbios_tables_len);
- fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-anchor",
+ fw_cfg_add_file(pcms->fw_cfg, "etc/smbios/smbios-anchor",
smbios_anchor, smbios_anchor_len);
}
}
@@ -1088,28 +1092,24 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level)
}
}
-static X86CPU *pc_new_cpu(const char *typename, int64_t apic_id,
- Error **errp)
+static void pc_new_cpu(const char *typename, int64_t apic_id, Error **errp)
{
- X86CPU *cpu = NULL;
+ Object *cpu = NULL;
Error *local_err = NULL;
- cpu = X86_CPU(object_new(typename));
+ cpu = object_new(typename);
- object_property_set_int(OBJECT(cpu), apic_id, "apic-id", &local_err);
- object_property_set_bool(OBJECT(cpu), true, "realized", &local_err);
+ object_property_set_int(cpu, apic_id, "apic-id", &local_err);
+ object_property_set_bool(cpu, true, "realized", &local_err);
+ object_unref(cpu);
if (local_err) {
error_propagate(errp, local_err);
- object_unref(OBJECT(cpu));
- cpu = NULL;
}
- return cpu;
}
void pc_hot_add_cpu(const int64_t id, Error **errp)
{
- X86CPU *cpu;
ObjectClass *oc;
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
int64_t apic_id = x86_cpu_apic_id_from_index(id);
@@ -1129,12 +1129,11 @@ void pc_hot_add_cpu(const int64_t id, Error **errp)
assert(pcms->possible_cpus->cpus[0].cpu); /* BSP is always present */
oc = OBJECT_CLASS(CPU_GET_CLASS(pcms->possible_cpus->cpus[0].cpu));
- cpu = pc_new_cpu(object_class_get_name(oc), apic_id, &local_err);
+ pc_new_cpu(object_class_get_name(oc), apic_id, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
- object_unref(OBJECT(cpu));
}
void pc_cpus_init(PCMachineState *pcms)
@@ -1144,7 +1143,6 @@ void pc_cpus_init(PCMachineState *pcms)
ObjectClass *oc;
const char *typename;
gchar **model_pieces;
- X86CPU *cpu = NULL;
MachineState *machine = MACHINE(pcms);
/* init CPUs */
@@ -1186,14 +1184,9 @@ void pc_cpus_init(PCMachineState *pcms)
pcms->possible_cpus->cpus[i].arch_id = x86_cpu_apic_id_from_index(i);
pcms->possible_cpus->len++;
if (i < smp_cpus) {
- cpu = pc_new_cpu(typename, x86_cpu_apic_id_from_index(i),
- &error_fatal);
- object_unref(OBJECT(cpu));
+ pc_new_cpu(typename, x86_cpu_apic_id_from_index(i), &error_fatal);
}
}
-
- /* tell smbios about cpuid version and features */
- smbios_set_cpuid(cpu->env.cpuid_version, cpu->env.features[FEAT_1_EDX]);
}
static void pc_build_feature_control_file(PCMachineState *pcms)
@@ -1266,7 +1259,7 @@ void pc_machine_done(Notifier *notifier, void *data)
acpi_setup();
if (pcms->fw_cfg) {
- pc_build_smbios(pcms->fw_cfg);
+ pc_build_smbios(pcms);
pc_build_feature_control_file(pcms);
/* update FW_CFG_NB_CPUS to account for -device added CPUs */
fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
@@ -1784,7 +1777,7 @@ static int pc_apic_cmp(const void *a, const void *b)
/* returns pointer to CPUArchId descriptor that matches CPU's apic_id
* in pcms->possible_cpus->cpus, if pcms->possible_cpus->cpus has no
- * entry correponding to CPU's apic_id returns NULL.
+ * entry corresponding to CPU's apic_id returns NULL.
*/
static CPUArchId *pc_find_cpu_slot(PCMachineState *pcms, CPUState *cpu,
int *idx)
@@ -1820,8 +1813,10 @@ static void pc_cpu_plug(HotplugHandler *hotplug_dev,
/* increment the number of CPUs */
pcms->boot_cpus++;
- if (dev->hotplugged) {
+ if (pcms->rtc) {
rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus);
+ }
+ if (pcms->fw_cfg) {
fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus);
}
@@ -2245,15 +2240,11 @@ static unsigned pc_cpu_index_to_socket_id(unsigned cpu_index)
return topo.pkg_id;
}
-static CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine)
+static const CPUArchIdList *pc_possible_cpu_arch_ids(MachineState *machine)
{
PCMachineState *pcms = PC_MACHINE(machine);
- int len = sizeof(CPUArchIdList) +
- sizeof(CPUArchId) * (pcms->possible_cpus->len);
- CPUArchIdList *list = g_malloc(len);
-
- memcpy(list, pcms->possible_cpus, len);
- return list;
+ assert(pcms->possible_cpus);
+ return pcms->possible_cpus;
}
static HotpluggableCPUList *pc_query_hotpluggable_cpus(MachineState *machine)
diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c
index 5e1adbe53c..9f102aa388 100644
--- a/hw/i386/pc_piix.c
+++ b/hw/i386/pc_piix.c
@@ -437,13 +437,24 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->default_display = "std";
}
-static void pc_i440fx_2_8_machine_options(MachineClass *m)
+static void pc_i440fx_2_9_machine_options(MachineClass *m)
{
pc_i440fx_machine_options(m);
m->alias = "pc";
m->is_default = 1;
}
+DEFINE_I440FX_MACHINE(v2_9, "pc-i440fx-2.9", NULL,
+ pc_i440fx_2_9_machine_options);
+
+static void pc_i440fx_2_8_machine_options(MachineClass *m)
+{
+ pc_i440fx_2_9_machine_options(m);
+ m->is_default = 0;
+ m->alias = NULL;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_8);
+}
+
DEFINE_I440FX_MACHINE(v2_8, "pc-i440fx-2.8", NULL,
pc_i440fx_2_8_machine_options);
@@ -451,8 +462,6 @@ DEFINE_I440FX_MACHINE(v2_8, "pc-i440fx-2.8", NULL,
static void pc_i440fx_2_7_machine_options(MachineClass *m)
{
pc_i440fx_2_8_machine_options(m);
- m->is_default = 0;
- m->alias = NULL;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_7);
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index d042fe0843..dd792a8547 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -301,19 +301,28 @@ static void pc_q35_machine_options(MachineClass *m)
m->max_cpus = 288;
}
-static void pc_q35_2_8_machine_options(MachineClass *m)
+static void pc_q35_2_9_machine_options(MachineClass *m)
{
pc_q35_machine_options(m);
m->alias = "q35";
}
+DEFINE_Q35_MACHINE(v2_9, "pc-q35-2.9", NULL,
+ pc_q35_2_9_machine_options);
+
+static void pc_q35_2_8_machine_options(MachineClass *m)
+{
+ pc_q35_2_9_machine_options(m);
+ m->alias = NULL;
+ SET_MACHINE_COMPAT(m, PC_COMPAT_2_8);
+}
+
DEFINE_Q35_MACHINE(v2_8, "pc-q35-2.8", NULL,
pc_q35_2_8_machine_options);
static void pc_q35_2_7_machine_options(MachineClass *m)
{
pc_q35_2_8_machine_options(m);
- m->alias = NULL;
m->max_cpus = 255;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_7);
}
diff --git a/hw/i386/pci-assign-load-rom.c b/hw/i386/pci-assign-load-rom.c
index 0d8e4b2826..fd59076e7a 100644
--- a/hw/i386/pci-assign-load-rom.c
+++ b/hw/i386/pci-assign-load-rom.c
@@ -39,19 +39,19 @@ void *pci_assign_dev_load_option_rom(PCIDevice *dev, struct Object *owner,
"/sys/bus/pci/devices/%04x:%02x:%02x.%01x/rom",
domain, bus, slot, function);
- if (stat(rom_file, &st)) {
- if (errno != ENOENT) {
- error_report("pci-assign: Invalid ROM.");
- }
- return NULL;
- }
-
/* Write "1" to the ROM file to enable it */
fp = fopen(rom_file, "r+");
if (fp == NULL) {
- error_report("pci-assign: Cannot open %s: %s", rom_file, strerror(errno));
+ if (errno != ENOENT) {
+ error_report("pci-assign: Cannot open %s: %s", rom_file, strerror(errno));
+ }
return NULL;
}
+ if (fstat(fileno(fp), &st) == -1) {
+ error_report("pci-assign: Cannot stat %s: %s", rom_file, strerror(errno));
+ goto close_rom;
+ }
+
val = 1;
if (fwrite(&val, 1, 1, fp) != 1) {
goto close_rom;
diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c
index 2278af7c32..23dcd3f039 100644
--- a/hw/i386/x86-iommu.c
+++ b/hw/i386/x86-iommu.c
@@ -106,6 +106,18 @@ static void x86_iommu_intremap_prop_set(Object *o, bool value, Error **errp)
s->intr_supported = value;
}
+static bool x86_iommu_device_iotlb_prop_get(Object *o, Error **errp)
+{
+ X86IOMMUState *s = X86_IOMMU_DEVICE(o);
+ return s->dt_supported;
+}
+
+static void x86_iommu_device_iotlb_prop_set(Object *o, bool value, Error **errp)
+{
+ X86IOMMUState *s = X86_IOMMU_DEVICE(o);
+ s->dt_supported = value;
+}
+
static void x86_iommu_instance_init(Object *o)
{
X86IOMMUState *s = X86_IOMMU_DEVICE(o);
@@ -114,6 +126,11 @@ static void x86_iommu_instance_init(Object *o)
s->intr_supported = false;
object_property_add_bool(o, "intremap", x86_iommu_intremap_prop_get,
x86_iommu_intremap_prop_set, NULL);
+ s->dt_supported = false;
+ object_property_add_bool(o, "device-iotlb",
+ x86_iommu_device_iotlb_prop_get,
+ x86_iommu_device_iotlb_prop_set,
+ NULL);
}
static const TypeInfo x86_iommu_info = {
diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c
index 539682cac8..2340523da0 100644
--- a/hw/input/lm832x.c
+++ b/hw/input/lm832x.c
@@ -383,7 +383,7 @@ static void lm_kbd_write(LM823KbdState *s, int reg, int byte, uint8_t value)
}
}
-static void lm_i2c_event(I2CSlave *i2c, enum i2c_event event)
+static int lm_i2c_event(I2CSlave *i2c, enum i2c_event event)
{
LM823KbdState *s = LM8323(i2c);
@@ -397,6 +397,8 @@ static void lm_i2c_event(I2CSlave *i2c, enum i2c_event event)
default:
break;
}
+
+ return 0;
}
static int lm_i2c_rx(I2CSlave *i2c)
diff --git a/hw/input/ps2.c b/hw/input/ps2.c
index 0d14de08a6..8485a4edaf 100644
--- a/hw/input/ps2.c
+++ b/hw/input/ps2.c
@@ -252,6 +252,9 @@ static const uint16_t qcode_to_keycode_set1[Q_KEY_CODE__MAX] = {
[Q_KEY_CODE_ASTERISK] = 0x37,
[Q_KEY_CODE_LESS] = 0x56,
[Q_KEY_CODE_RO] = 0x73,
+ [Q_KEY_CODE_HIRAGANA] = 0x70,
+ [Q_KEY_CODE_HENKAN] = 0x79,
+ [Q_KEY_CODE_YEN] = 0x7d,
[Q_KEY_CODE_KP_COMMA] = 0x7e,
};
@@ -394,6 +397,9 @@ static const uint16_t qcode_to_keycode_set2[Q_KEY_CODE__MAX] = {
[Q_KEY_CODE_LESS] = 0x61,
[Q_KEY_CODE_SYSRQ] = 0x7f,
[Q_KEY_CODE_RO] = 0x51,
+ [Q_KEY_CODE_HIRAGANA] = 0x13,
+ [Q_KEY_CODE_HENKAN] = 0x64,
+ [Q_KEY_CODE_YEN] = 0x6a,
[Q_KEY_CODE_KP_COMMA] = 0x6d,
};
@@ -504,6 +510,10 @@ static const uint16_t qcode_to_keycode_set3[Q_KEY_CODE__MAX] = {
[Q_KEY_CODE_COMMA] = 0x41,
[Q_KEY_CODE_DOT] = 0x49,
[Q_KEY_CODE_SLASH] = 0x4a,
+
+ [Q_KEY_CODE_HIRAGANA] = 0x87,
+ [Q_KEY_CODE_HENKAN] = 0x86,
+ [Q_KEY_CODE_YEN] = 0x5d,
};
static uint8_t translate_table[256] = {
diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs
index 2f44a2da26..8948106ac4 100644
--- a/hw/intc/Makefile.objs
+++ b/hw/intc/Makefile.objs
@@ -41,3 +41,4 @@ obj-$(CONFIG_S390_FLIC_KVM) += s390_flic_kvm.o
obj-$(CONFIG_ASPEED_SOC) += aspeed_vic.o
obj-$(CONFIG_ARM_GIC) += arm_gicv3_cpuif.o
obj-$(CONFIG_MIPS_CPS) += mips_gic.o
+obj-$(CONFIG_NIOS2) += nios2_iic.o
diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c
index d78c885509..3945dfd7b9 100644
--- a/hw/intc/apic_common.c
+++ b/hw/intc/apic_common.c
@@ -26,6 +26,7 @@
#include "hw/i386/apic.h"
#include "hw/i386/apic_internal.h"
#include "trace.h"
+#include "sysemu/hax.h"
#include "sysemu/kvm.h"
#include "hw/qdev.h"
#include "hw/sysbus.h"
@@ -316,7 +317,7 @@ static void apic_common_realize(DeviceState *dev, Error **errp)
/* Note: We need at least 1M to map the VAPIC option ROM */
if (!vapic && s->vapic_control & VAPIC_ENABLE_MASK &&
- ram_size >= 1024 * 1024) {
+ !hax_enabled() && ram_size >= 1024 * 1024) {
vapic = sysbus_create_simple("kvmvapic", -1, NULL);
}
s->vapic = vapic;
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index 0a1f56af19..4a8df44fb1 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -110,6 +110,12 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
for (i = 0; i < s->num_cpu; i++) {
sysbus_init_irq(sbd, &s->parent_fiq[i]);
}
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_virq[i]);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->parent_vfiq[i]);
+ }
/* Distributor */
memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000);
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 11729ee902..ec952ece93 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -510,6 +510,17 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
+ if (!kvm_arm_gic_can_save_restore(s)) {
+ error_setg(&s->migration_blocker, "This operating system kernel does "
+ "not support vGICv2 migration");
+ migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ return;
+ }
+ }
+
gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL);
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
@@ -558,12 +569,6 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
KVM_VGIC_V2_ADDR_TYPE_CPU,
s->dev_fd);
- if (!kvm_arm_gic_can_save_restore(s)) {
- error_setg(&s->migration_blocker, "This operating system kernel does "
- "not support vGICv2 migration");
- migrate_add_blocker(s->migration_blocker);
- }
-
if (kvm_has_gsi_routing()) {
/* set up irq routing */
kvm_init_irq_routing(kvm_state);
diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c
index 8a6c647219..f0c967b304 100644
--- a/hw/intc/arm_gicv3.c
+++ b/hw/intc/arm_gicv3.c
@@ -54,6 +54,7 @@ static uint32_t gicd_int_pending(GICv3State *s, int irq)
* + the PENDING latch is set OR it is level triggered and the input is 1
* + its ENABLE bit is set
* + the GICD enable bit for its group is set
+ * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
* Conveniently we can bulk-calculate this with bitwise operations.
*/
uint32_t pend, grpmask;
@@ -63,9 +64,11 @@ static uint32_t gicd_int_pending(GICv3State *s, int irq)
uint32_t group = *gic_bmp_ptr32(s->group, irq);
uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
+ uint32_t active = *gic_bmp_ptr32(s->active, irq);
pend = pending | (~edge_trigger & level);
pend &= enable;
+ pend &= ~active;
if (s->gicd_ctlr & GICD_CTLR_DS) {
grpmod = 0;
@@ -96,12 +99,14 @@ static uint32_t gicr_int_pending(GICv3CPUState *cs)
* + the PENDING latch is set OR it is level triggered and the input is 1
* + its ENABLE bit is set
* + the GICD enable bit for its group is set
+ * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
* Conveniently we can bulk-calculate this with bitwise operations.
*/
uint32_t pend, grpmask, grpmod;
pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
pend &= cs->gicr_ienabler0;
+ pend &= ~cs->gicr_iactiver0;
if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
grpmod = 0;
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 0f8c4b86e0..16b9b0f7eb 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -49,6 +49,27 @@ static int gicv3_post_load(void *opaque, int version_id)
return 0;
}
+static bool virt_state_needed(void *opaque)
+{
+ GICv3CPUState *cs = opaque;
+
+ return cs->num_list_regs != 0;
+}
+
+static const VMStateDescription vmstate_gicv3_cpu_virt = {
+ .name = "arm_gicv3_cpu/virt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = virt_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
+ VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
+ VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
+ VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_gicv3_cpu = {
.name = "arm_gicv3_cpu",
.version_id = 1,
@@ -75,6 +96,10 @@ static const VMStateDescription vmstate_gicv3_cpu = {
VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gicv3_cpu_virt,
+ NULL
}
};
@@ -126,6 +151,12 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
for (i = 0; i < s->num_cpu; i++) {
sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
}
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
+ }
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
+ }
memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
"gicv3_dist", 0x10000);
@@ -204,7 +235,8 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
/* The CPU mp-affinity property is in MPIDR register format; squash
* the affinity bytes into 32 bits as the GICR_TYPER has them.
*/
- cpu_affid = (cpu_affid & 0xFF00000000ULL >> 8) | (cpu_affid & 0xFFFFFF);
+ cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
+ (cpu_affid & 0xFFFFFF);
s->cpu[i].gicr_typer = (cpu_affid << 32) |
(1 << 24) |
(i << 8) |
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index bca30c49da..a9ee7fddf9 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -13,6 +13,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
#include "trace.h"
#include "gicv3_internal.h"
#include "cpu.h"
@@ -36,6 +37,610 @@ static bool gicv3_use_ns_bank(CPUARMState *env)
return !arm_is_secure_below_el3(env);
}
+/* The minimum BPR for the virtual interface is a configurable property */
+static inline int icv_min_vbpr(GICv3CPUState *cs)
+{
+ return 7 - cs->vprebits;
+}
+
+/* Simple accessor functions for LR fields */
+static uint32_t ich_lr_vintid(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
+}
+
+static uint32_t ich_lr_pintid(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
+}
+
+static uint32_t ich_lr_prio(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
+}
+
+static int ich_lr_state(uint64_t lr)
+{
+ return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
+}
+
+static bool icv_access(CPUARMState *env, int hcr_flags)
+{
+ /* Return true if this ICC_ register access should really be
+ * directed to an ICV_ access. hcr_flags is a mask of
+ * HCR_EL2 bits to check: we treat this as an ICV_ access
+ * if we are in NS EL1 and at least one of the specified
+ * HCR_EL2 bits is set.
+ *
+ * ICV registers fall into four categories:
+ * * access if NS EL1 and HCR_EL2.FMO == 1:
+ * all ICV regs with '0' in their name
+ * * access if NS EL1 and HCR_EL2.IMO == 1:
+ * all ICV regs with '1' in their name
+ * * access if NS EL1 and either IMO or FMO == 1:
+ * CTLR, DIR, PMR, RPR
+ */
+ return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
+ && !arm_is_secure_below_el3(env);
+}
+
+static int read_vbpr(GICv3CPUState *cs, int grp)
+{
+ /* Read VBPR value out of the VMCR field (caller must handle
+ * VCBPR effects if required)
+ */
+ if (grp == GICV3_G0) {
+ return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
+ ICH_VMCR_EL2_VBPR0_LENGTH);
+ } else {
+ return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
+ ICH_VMCR_EL2_VBPR1_LENGTH);
+ }
+}
+
+static void write_vbpr(GICv3CPUState *cs, int grp, int value)
+{
+ /* Write new VBPR1 value, handling the "writing a value less than
+ * the minimum sets it to the minimum" semantics.
+ */
+ int min = icv_min_vbpr(cs);
+
+ if (grp != GICV3_G0) {
+ min++;
+ }
+
+ value = MAX(value, min);
+
+ if (grp == GICV3_G0) {
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
+ ICH_VMCR_EL2_VBPR0_LENGTH, value);
+ } else {
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
+ ICH_VMCR_EL2_VBPR1_LENGTH, value);
+ }
+}
+
+static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
+{
+ /* Return a mask word which clears the unimplemented priority bits
+ * from a priority value for a virtual interrupt. (Not to be confused
+ * with the group priority, whose mask depends on the value of VBPR
+ * for the interrupt group.)
+ */
+ return ~0U << (8 - cs->vpribits);
+}
+
+static int ich_highest_active_virt_prio(GICv3CPUState *cs)
+{
+ /* Calculate the current running priority based on the set bits
+ * in the ICH Active Priority Registers.
+ */
+ int i;
+ int aprmax = 1 << (cs->vprebits - 5);
+
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+
+ for (i = 0; i < aprmax; i++) {
+ uint32_t apr = cs->ich_apr[GICV3_G0][i] |
+ cs->ich_apr[GICV3_G1NS][i];
+
+ if (!apr) {
+ continue;
+ }
+ return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
+ }
+ /* No current active interrupts: return idle priority */
+ return 0xff;
+}
+
+static int hppvi_index(GICv3CPUState *cs)
+{
+ /* Return the list register index of the highest priority pending
+ * virtual interrupt, as per the HighestPriorityVirtualInterrupt
+ * pseudocode. If no pending virtual interrupts, return -1.
+ */
+ int idx = -1;
+ int i;
+ /* Note that a list register entry with a priority of 0xff will
+ * never be reported by this function; this is the architecturally
+ * correct behaviour.
+ */
+ int prio = 0xff;
+
+ if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
+ /* Both groups disabled, definitely nothing to do */
+ return idx;
+ }
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+ int thisprio;
+
+ if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
+ /* Not Pending */
+ continue;
+ }
+
+ /* Ignore interrupts if relevant group enable not set */
+ if (lr & ICH_LR_EL2_GROUP) {
+ if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ continue;
+ }
+ } else {
+ if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
+ continue;
+ }
+ }
+
+ thisprio = ich_lr_prio(lr);
+
+ if (thisprio < prio) {
+ prio = thisprio;
+ idx = i;
+ }
+ }
+
+ return idx;
+}
+
+static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
+{
+ /* Return a mask word which clears the subpriority bits from
+ * a priority value for a virtual interrupt in the specified group.
+ * This depends on the VBPR value:
+ * a BPR of 0 means the group priority bits are [7:1];
+ * a BPR of 1 means they are [7:2], and so on down to
+ * a BPR of 7 meaning no group priority bits at all.
+ * Which BPR to use depends on the group of the interrupt and
+ * the current ICH_VMCR_EL2.VCBPR settings.
+ */
+ if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
+ group = GICV3_G0;
+ }
+
+ return ~0U << (read_vbpr(cs, group) + 1);
+}
+
+static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
+{
+ /* Return true if we can signal this virtual interrupt defined by
+ * the given list register value; see the pseudocode functions
+ * CanSignalVirtualInterrupt and CanSignalVirtualInt.
+ * Compare also icc_hppi_can_preempt() which is the non-virtual
+ * equivalent of these checks.
+ */
+ int grp;
+ uint32_t mask, prio, rprio, vpmr;
+
+ if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
+ /* Virtual interface disabled */
+ return false;
+ }
+
+ /* We don't need to check that this LR is in Pending state because
+ * that has already been done in hppvi_index().
+ */
+
+ prio = ich_lr_prio(lr);
+ vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH);
+
+ if (prio >= vpmr) {
+ /* Priority mask masks this interrupt */
+ return false;
+ }
+
+ rprio = ich_highest_active_virt_prio(cs);
+ if (rprio == 0xff) {
+ /* No running interrupt so we can preempt */
+ return true;
+ }
+
+ grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ mask = icv_gprio_mask(cs, grp);
+
+ /* We only preempt a running interrupt if the pending interrupt's
+ * group priority is sufficient (the subpriorities are not considered).
+ */
+ if ((prio & mask) < (rprio & mask)) {
+ return true;
+ }
+
+ return false;
+}
+
+static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
+ uint32_t *misr)
+{
+ /* Return a set of bits indicating the EOI maintenance interrupt status
+ * for each list register. The EOI maintenance interrupt status is
+ * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
+ * (see the GICv3 spec for the ICH_EISR_EL2 register).
+ * If misr is not NULL then we should also collect the information
+ * about the MISR.EOI, MISR.NP and MISR.U bits.
+ */
+ uint32_t value = 0;
+ int validcount = 0;
+ bool seenpending = false;
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
+ == ICH_LR_EL2_EOI) {
+ value |= (1 << i);
+ }
+ if ((lr & ICH_LR_EL2_STATE_MASK)) {
+ validcount++;
+ }
+ if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
+ seenpending = true;
+ }
+ }
+
+ if (misr) {
+ if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
+ *misr |= ICH_MISR_EL2_U;
+ }
+ if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
+ *misr |= ICH_MISR_EL2_NP;
+ }
+ if (value) {
+ *misr |= ICH_MISR_EL2_EOI;
+ }
+ }
+ return value;
+}
+
+static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
+{
+ /* Return a set of bits indicating the maintenance interrupt status
+ * (as seen in the ICH_MISR_EL2 register).
+ */
+ uint32_t value = 0;
+
+ /* Scan list registers and fill in the U, NP and EOI bits */
+ eoi_maintenance_interrupt_state(cs, &value);
+
+ if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
+ value |= ICH_MISR_EL2_LRENP;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
+ (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
+ value |= ICH_MISR_EL2_VGRP0E;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
+ !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP0D;
+ }
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
+ (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP1E;
+ }
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
+ !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
+ value |= ICH_MISR_EL2_VGRP1D;
+ }
+
+ return value;
+}
+
+static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
+{
+ /* Tell the CPU about any pending virtual interrupts or
+ * maintenance interrupts, following a change to the state
+ * of the CPU interface relevant to virtual interrupts.
+ *
+ * CAUTION: this function will call qemu_set_irq() on the
+ * CPU maintenance IRQ line, which is typically wired up
+ * to the GIC as a per-CPU interrupt. This means that it
+ * will recursively call back into the GIC code via
+ * gicv3_redist_set_irq() and thus into the CPU interface code's
+ * gicv3_cpuif_update(). It is therefore important that this
+ * function is only called as the final action of a CPU interface
+ * register write implementation, after all the GIC state
+ * fields have been updated. gicv3_cpuif_update() also must
+ * not cause this function to be called, but that happens
+ * naturally as a result of there being no architectural
+ * linkage between the physical and virtual GIC logic.
+ */
+ int idx;
+ int irqlevel = 0;
+ int fiqlevel = 0;
+ int maintlevel = 0;
+
+ idx = hppvi_index(cs);
+ trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+
+ if (icv_hppi_can_preempt(cs, lr)) {
+ /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
+ if (lr & ICH_LR_EL2_GROUP) {
+ irqlevel = 1;
+ } else {
+ fiqlevel = 1;
+ }
+ }
+ }
+
+ if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
+ maintlevel = maintenance_interrupt_state(cs);
+ }
+
+ trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
+ irqlevel, maintlevel);
+
+ qemu_set_irq(cs->parent_vfiq, fiqlevel);
+ qemu_set_irq(cs->parent_virq, irqlevel);
+ qemu_set_irq(cs->maintenance_irq, maintlevel);
+}
+
+static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ uint64_t value = cs->ich_apr[grp][regno];
+
+ trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
+
+ gicv3_cpuif_virt_update(cs);
+ return;
+}
+
+static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
+ uint64_t bpr;
+ bool satinc = false;
+
+ if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
+ /* reads return bpr0 + 1 saturated to 7, writes ignored */
+ grp = GICV3_G0;
+ satinc = true;
+ }
+
+ bpr = read_vbpr(cs, grp);
+
+ if (satinc) {
+ bpr++;
+ bpr = MIN(bpr, 7);
+ }
+
+ trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
+
+ return bpr;
+}
+
+static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
+ /* reads return bpr0 + 1 saturated to 7, writes ignored */
+ return;
+ }
+
+ write_vbpr(cs, grp, value);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH);
+
+ trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
+
+ value &= icv_fullprio_mask(cs);
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
+ ICH_VMCR_EL2_VPMR_LENGTH, value);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int enbit;
+ uint64_t value;
+
+ enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
+ value = extract64(cs->ich_vmcr_el2, enbit, 1);
+
+ trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int enbit;
+
+ trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
+ gicv3_redist_affid(cs), value);
+
+ enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
+ * should match the ones reported in ich_vtr_read().
+ */
+ value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
+ (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+
+ if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
+ value |= ICC_CTLR_EL1_EOIMODE;
+ }
+
+ if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
+ value |= ICC_CTLR_EL1_CBPR;
+ }
+
+ trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
+
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
+ 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
+ cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
+ 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int prio = ich_highest_active_virt_prio(cs);
+
+ trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
+ return prio;
+}
+
+static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx = hppvi_index(cs);
+ uint64_t value = INTID_SPURIOUS;
+
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ if (grp == thisgrp) {
+ value = ich_lr_vintid(lr);
+ }
+ }
+
+ trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
+{
+ /* Activate the interrupt in the specified list register
+ * by moving it from Pending to Active state, and update the
+ * Active Priority Registers.
+ */
+ uint32_t mask = icv_gprio_mask(cs, grp);
+ int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
+ int aprbit = prio >> (8 - cs->vprebits);
+ int regno = aprbit / 32;
+ int regbit = aprbit % 32;
+
+ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
+ cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
+ cs->ich_apr[grp][regno] |= (1 << regbit);
+}
+
+static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx = hppvi_index(cs);
+ uint64_t intid = INTID_SPURIOUS;
+
+ if (idx >= 0) {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+
+ if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
+ intid = ich_lr_vintid(lr);
+ if (intid < INTID_SECURE) {
+ icv_activate_irq(cs, idx, grp);
+ } else {
+ /* Interrupt goes from Pending to Invalid */
+ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
+ /* We will now return the (bogus) ID from the list register,
+ * as per the pseudocode.
+ */
+ }
+ }
+ }
+
+ trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), intid);
+ return intid;
+}
+
static int icc_highest_active_prio(GICv3CPUState *cs)
{
/* Calculate the current running priority based on the set bits
@@ -177,6 +782,10 @@ static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
GICv3CPUState *cs = icc_cs_from_env(env);
uint32_t value = cs->icc_pmr_el1;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_pmr_read(env, ri);
+ }
+
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
(env->cp15.scr_el3 & SCR_FIQ)) {
/* NS access and Group 0 is inaccessible to NS: return the
@@ -200,6 +809,10 @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
GICv3CPUState *cs = icc_cs_from_env(env);
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_pmr_write(env, ri, value);
+ }
+
trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
value &= 0xff;
@@ -321,6 +934,10 @@ static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
GICv3CPUState *cs = icc_cs_from_env(env);
uint64_t intid;
+ if (icv_access(env, HCR_FMO)) {
+ return icv_iar_read(env, ri);
+ }
+
if (!icc_hppi_can_preempt(cs)) {
intid = INTID_SPURIOUS;
} else {
@@ -340,6 +957,10 @@ static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
GICv3CPUState *cs = icc_cs_from_env(env);
uint64_t intid;
+ if (icv_access(env, HCR_IMO)) {
+ return icv_iar_read(env, ri);
+ }
+
if (!icc_hppi_can_preempt(cs)) {
intid = INTID_SPURIOUS;
} else {
@@ -446,6 +1067,190 @@ static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
}
}
+static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
+{
+ /* Return true if we should split priority drop and interrupt
+ * deactivation, ie whether the virtual EOIMode bit is set.
+ */
+ return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
+}
+
+static int icv_find_active(GICv3CPUState *cs, int irq)
+{
+ /* Given an interrupt number for an active interrupt, return the index
+ * of the corresponding list register, or -1 if there is no match.
+ * Corresponds to FindActiveVirtualInterrupt pseudocode.
+ */
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
+{
+ /* Deactivate the interrupt in the specified list register index */
+ uint64_t lr = cs->ich_lr_el2[idx];
+
+ if (lr & ICH_LR_EL2_HW) {
+ /* Deactivate the associated physical interrupt */
+ int pirq = ich_lr_pintid(lr);
+
+ if (pirq < INTID_SECURE) {
+ icc_deactivate_irq(cs, pirq);
+ }
+ }
+
+ /* Clear the 'active' part of the state, so ActivePending->Pending
+ * and Active->Invalid.
+ */
+ lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
+ cs->ich_lr_el2[idx] = lr;
+}
+
+static void icv_increment_eoicount(GICv3CPUState *cs)
+{
+ /* Increment the EOICOUNT field in ICH_HCR_EL2 */
+ int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
+ ICH_HCR_EL2_EOICOUNT_LENGTH);
+
+ cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
+ ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
+}
+
+static int icv_drop_prio(GICv3CPUState *cs)
+{
+ /* Drop the priority of the currently active virtual interrupt
+ * (favouring group 0 if there is a set active bit at
+ * the same priority for both group 0 and group 1).
+ * Return the priority value for the bit we just cleared,
+ * or 0xff if no bits were set in the AP registers at all.
+ * Note that though the ich_apr[] are uint64_t only the low
+ * 32 bits are actually relevant.
+ */
+ int i;
+ int aprmax = 1 << (cs->vprebits - 5);
+
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+
+ for (i = 0; i < aprmax; i++) {
+ uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
+ uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
+ int apr0count, apr1count;
+
+ if (!*papr0 && !*papr1) {
+ continue;
+ }
+
+ /* We can't just use the bit-twiddling hack icc_drop_prio() does
+ * because we need to return the bit number we cleared so
+ * it can be compared against the list register's priority field.
+ */
+ apr0count = ctz32(*papr0);
+ apr1count = ctz32(*papr1);
+
+ if (apr0count <= apr1count) {
+ *papr0 &= *papr0 - 1;
+ return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
+ } else {
+ *papr1 &= *papr1 - 1;
+ return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
+ }
+ }
+ return 0xff;
+}
+
+static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Deactivate interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int idx;
+ int irq = value & 0xffffff;
+
+ trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
+
+ if (irq >= cs->gic->num_irq) {
+ /* Also catches special interrupt numbers and LPIs */
+ return;
+ }
+
+ if (!icv_eoi_split(env, cs)) {
+ return;
+ }
+
+ idx = icv_find_active(cs, irq);
+
+ if (idx < 0) {
+ /* No list register matching this, so increment the EOI count
+ * (might trigger a maintenance interrupt)
+ */
+ icv_increment_eoicount(cs);
+ } else {
+ icv_deactivate_irq(cs, idx);
+ }
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* End of Interrupt */
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int irq = value & 0xffffff;
+ int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
+ int idx, dropprio;
+
+ trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
+ gicv3_redist_affid(cs), value);
+
+ if (irq >= cs->gic->num_irq) {
+ /* Also catches special interrupt numbers and LPIs */
+ return;
+ }
+
+ /* We implement the IMPDEF choice of "drop priority before doing
+ * error checks" (because that lets us avoid scanning the AP
+ * registers twice).
+ */
+ dropprio = icv_drop_prio(cs);
+ if (dropprio == 0xff) {
+ /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
+ * whether the list registers are checked in this
+ * situation; we choose not to.
+ */
+ return;
+ }
+
+ idx = icv_find_active(cs, irq);
+
+ if (idx < 0) {
+ /* No valid list register corresponding to EOI ID */
+ icv_increment_eoicount(cs);
+ } else {
+ uint64_t lr = cs->ich_lr_el2[idx];
+ int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
+ int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
+
+ if (thisgrp == grp && lr_gprio == dropprio) {
+ if (!icv_eoi_split(env, cs)) {
+ /* Priority drop and deactivate not split: deactivate irq now */
+ icv_deactivate_irq(cs, idx);
+ }
+ }
+ }
+
+ gicv3_cpuif_virt_update(cs);
+}
+
static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -454,6 +1259,11 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
int irq = value & 0xffffff;
int grp;
+ if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) {
+ icv_eoir_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
gicv3_redist_affid(cs), value);
@@ -496,8 +1306,13 @@ static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- uint64_t value = icc_hppir0_value(cs, env);
+ uint64_t value;
+ if (icv_access(env, HCR_FMO)) {
+ return icv_hppir_read(env, ri);
+ }
+
+ value = icc_hppir0_value(cs, env);
trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
return value;
}
@@ -505,8 +1320,13 @@ static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- uint64_t value = icc_hppir1_value(cs, env);
+ uint64_t value;
+
+ if (icv_access(env, HCR_IMO)) {
+ return icv_hppir_read(env, ri);
+ }
+ value = icc_hppir1_value(cs, env);
trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
return value;
}
@@ -518,6 +1338,10 @@ static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
bool satinc = false;
uint64_t bpr;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_bpr_read(env, ri);
+ }
+
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
}
@@ -554,6 +1378,11 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_bpr_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
gicv3_redist_affid(cs), value);
@@ -587,6 +1416,10 @@ static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
int regno = ri->opc2 & 3;
int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_ap_read(env, ri);
+ }
+
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
}
@@ -605,6 +1438,11 @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
int regno = ri->opc2 & 3;
int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_ap_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
@@ -633,6 +1471,11 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
bool irq_is_secure, single_sec_state, irq_is_grp0;
bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ icv_dir_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
if (irq >= cs->gic->num_irq) {
@@ -704,7 +1547,13 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- int prio = icc_highest_active_prio(cs);
+ int prio;
+
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_rpr_read(env, ri);
+ }
+
+ prio = icc_highest_active_prio(cs);
if (arm_feature(env, ARM_FEATURE_EL3) &&
!arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
@@ -817,6 +1666,10 @@ static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
uint64_t value;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ return icv_igrpen_read(env, ri);
+ }
+
if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
grp = GICV3_G1NS;
}
@@ -833,6 +1686,11 @@ static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
GICv3CPUState *cs = icc_cs_from_env(env);
int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
+ if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
+ icv_igrpen_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
gicv3_redist_affid(cs), value);
@@ -874,6 +1732,10 @@ static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
uint64_t value;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ return icv_ctlr_read(env, ri);
+ }
+
value = cs->icc_ctlr_el1[bank];
trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
return value;
@@ -886,6 +1748,11 @@ static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
uint64_t mask;
+ if (icv_access(env, HCR_FMO | HCR_IMO)) {
+ icv_ctlr_write(env, ri, value);
+ return;
+ }
+
trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
/* Only CBPR and EOIMODE can be RW;
@@ -966,9 +1833,17 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
- switch (arm_current_el(env)) {
+ switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
@@ -994,13 +1869,47 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
return r;
}
+static CPAccessResult gicv3_dir_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
+ arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return gicv3_irqfiq_access(env, ri, isread);
+}
+
+static CPAccessResult gicv3_sgi_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) &&
+ arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
+
+ return gicv3_irqfiq_access(env, ri, isread);
+}
+
static CPAccessResult gicv3_fiq_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
if (env->cp15.scr_el3 & SCR_FIQ) {
- switch (arm_current_el(env)) {
+ switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
@@ -1030,9 +1939,17 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
CPAccessResult r = CP_ACCESS_OK;
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
+
+ if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
+ el == 1 && !arm_is_secure_below_el3(env)) {
+ /* Takes priority over a possible EL3 trap */
+ return CP_ACCESS_TRAP_EL2;
+ }
if (env->cp15.scr_el3 & SCR_IRQ) {
- switch (arm_current_el(env)) {
+ switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
@@ -1081,6 +1998,13 @@ static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
(1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
(7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
+
+ memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
+ cs->ich_hcr_el2 = 0;
+ memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
+ cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
+ (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR1_SHIFT) |
+ (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
}
static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
@@ -1118,35 +2042,35 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL1_RW, .accessfn = gicv3_fiq_access,
- .fieldoffset = offsetof(GICv3CPUState, icc_bpr[GICV3_G0]),
+ .readfn = icc_bpr_read,
.writefn = icc_bpr_write,
},
{ .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL1_RW, .accessfn = gicv3_fiq_access,
- .fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][0]),
+ .readfn = icc_ap_read,
.writefn = icc_ap_write,
},
{ .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL1_RW, .accessfn = gicv3_fiq_access,
- .fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][1]),
+ .readfn = icc_ap_read,
.writefn = icc_ap_write,
},
{ .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL1_RW, .accessfn = gicv3_fiq_access,
- .fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][2]),
+ .readfn = icc_ap_read,
.writefn = icc_ap_write,
},
{ .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL1_RW, .accessfn = gicv3_fiq_access,
- .fieldoffset = offsetof(GICv3CPUState, icc_apr[GICV3_G0][3]),
+ .readfn = icc_ap_read,
.writefn = icc_ap_write,
},
/* All the ICC_AP1R*_EL1 registers are banked */
@@ -1181,7 +2105,7 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
{ .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_dir_access,
.writefn = icc_dir_write,
},
{ .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
@@ -1193,37 +2117,37 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
{ .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi1r_write,
},
{ .name = "ICC_SGI1R",
.cp = 15, .opc1 = 0, .crm = 12,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi1r_write,
},
{ .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_asgi1r_write,
},
{ .name = "ICC_ASGI1R",
.cp = 15, .opc1 = 1, .crm = 12,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_asgi1r_write,
},
{ .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi0r_write,
},
{ .name = "ICC_SGI0R",
.cp = 15, .opc1 = 2, .crm = 12,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_W, .accessfn = gicv3_irqfiq_access,
+ .access = PL1_W, .accessfn = gicv3_sgi_access,
.writefn = icc_sgi0r_write,
},
{ .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
@@ -1275,7 +2199,7 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL1_RW, .accessfn = gicv3_fiq_access,
- .fieldoffset = offsetof(GICv3CPUState, icc_igrpen[GICV3_G0]),
+ .readfn = icc_igrpen_read,
.writefn = icc_igrpen_write,
},
/* This register is banked */
@@ -1299,7 +2223,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
.access = PL3_RW,
- .fieldoffset = offsetof(GICv3CPUState, icc_ctlr_el3),
.readfn = icc_ctlr_el3_read,
.writefn = icc_ctlr_el3_write,
},
@@ -1322,6 +2245,306 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
REGINFO_SENTINEL
};
+static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+ uint64_t value;
+
+ value = cs->ich_apr[grp][regno];
+ trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 & 3;
+ int grp = ri->crm & 1 ? GICV3_G0 : GICV3_G1NS;
+
+ trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
+
+ cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = cs->ich_hcr_el2;
+
+ trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
+
+ value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
+ ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
+ ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
+ ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
+ ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
+
+ cs->ich_hcr_el2 = value;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = cs->ich_vmcr_el2;
+
+ trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+
+ trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
+
+ value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
+ ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
+ ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
+ value |= ICH_VMCR_EL2_VFIQEN;
+
+ cs->ich_vmcr_el2 = value;
+ /* Enforce "writing BPRs to less than minimum sets them to the minimum"
+ * by reading and writing back the fields.
+ */
+ write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G0));
+ write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
+
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 | ((ri->crm & 1) << 3);
+ uint64_t value;
+
+ /* This read function handles all of:
+ * 64-bit reads of the whole LR
+ * 32-bit reads of the low half of the LR
+ * 32-bit reads of the high half of the LR
+ */
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (ri->crm >= 14) {
+ value = extract64(cs->ich_lr_el2[regno], 32, 32);
+ trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
+ } else {
+ value = extract64(cs->ich_lr_el2[regno], 0, 32);
+ trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
+ }
+ } else {
+ value = cs->ich_lr_el2[regno];
+ trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
+ }
+
+ return value;
+}
+
+static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ int regno = ri->opc2 | ((ri->crm & 1) << 3);
+
+ /* This write function handles all of:
+ * 64-bit writes to the whole LR
+ * 32-bit writes to the low half of the LR
+ * 32-bit writes to the high half of the LR
+ */
+ if (ri->state == ARM_CP_STATE_AA32) {
+ if (ri->crm >= 14) {
+ trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
+ value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
+ } else {
+ trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
+ value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
+ }
+ } else {
+ trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
+ }
+
+ /* Enforce RES0 bits in priority field */
+ if (cs->vpribits < 8) {
+ value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
+ 8 - cs->vpribits, 0);
+ }
+
+ cs->ich_lr_el2[regno] = value;
+ gicv3_cpuif_virt_update(cs);
+}
+
+static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value;
+
+ value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
+ | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
+ | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
+ | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
+ | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
+
+ trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = maintenance_interrupt_state(cs);
+
+ trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
+
+ trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t value = 0;
+ int i;
+
+ for (i = 0; i < cs->num_list_regs; i++) {
+ uint64_t lr = cs->ich_lr_el2[i];
+
+ if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
+ ((lr & ICH_LR_EL2_HW) == 1 || (lr & ICH_LR_EL2_EOI) == 0)) {
+ value |= (1 << i);
+ }
+ }
+
+ trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
+ return value;
+}
+
+static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
+ { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_hcr_read,
+ .writefn = ich_hcr_write,
+ },
+ { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_vtr_read,
+ },
+ { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_misr_read,
+ },
+ { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_eisr_read,
+ },
+ { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_R,
+ .readfn = ich_elrsr_read,
+ },
+ { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_vmcr_read,
+ .writefn = ich_vmcr_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
+ { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
+ { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_ap_read,
+ .writefn = ich_ap_write,
+ },
+ REGINFO_SENTINEL
+};
+
static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
{
GICv3CPUState *cs = opaque;
@@ -1350,6 +2573,59 @@ void gicv3_init_cpuif(GICv3State *s)
* to need to register anyway.
*/
define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
+ && cpu->gic_num_lrs) {
+ int j;
+
+ cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
+
+ cs->num_list_regs = cpu->gic_num_lrs;
+ cs->vpribits = cpu->gic_vpribits;
+ cs->vprebits = cpu->gic_vprebits;
+
+ /* Check against architectural constraints: getting these
+ * wrong would be a bug in the CPU code defining these,
+ * and the implementation relies on them holding.
+ */
+ g_assert(cs->vprebits <= cs->vpribits);
+ g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
+ g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
+
+ define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
+
+ for (j = 0; j < cs->num_list_regs; j++) {
+ /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
+ * are split into two cp15 regs, LR (the low part, with the
+ * same encoding as the AArch64 LR) and LRC (the high part).
+ */
+ ARMCPRegInfo lr_regset[] = {
+ { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 12,
+ .crm = 12 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 12,
+ .crm = 14 + (j >> 3), .opc2 = j & 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL2_RW,
+ .readfn = ich_lr_read,
+ .writefn = ich_lr_write,
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, lr_regset);
+ }
+ if (cs->vprebits >= 6) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
+ }
+ if (cs->vprebits == 7) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
+ }
+ }
arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
}
}
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
index fc246e0cb5..bd4f3aafc6 100644
--- a/hw/intc/arm_gicv3_its_kvm.c
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -56,6 +56,19 @@ static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid)
static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
{
GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ Error *local_err = NULL;
+
+ /*
+ * Block migration of a KVM GICv3 ITS device: the API for saving and
+ * restoring the state in the kernel is not yet available
+ */
+ error_setg(&s->migration_blocker, "vITS migration is not implemented");
+ migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ return;
+ }
s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_ITS, false);
if (s->dev_fd < 0) {
@@ -73,13 +86,6 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
gicv3_its_init_mmio(s, NULL);
- /*
- * Block migration of a KVM GICv3 ITS device: the API for saving and
- * restoring the state in the kernel is not yet available
- */
- error_setg(&s->migration_blocker, "vITS migration is not implemented");
- migrate_add_blocker(s->migration_blocker);
-
kvm_msi_use_devid = true;
kvm_gsi_direct_mapping = false;
kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 199a439ccf..d69dc47370 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -103,6 +103,18 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL);
+ /* Block migration of a KVM GICv3 device: the API for saving and restoring
+ * the state in the kernel is not yet finalised in the kernel or
+ * implemented in QEMU.
+ */
+ error_setg(&s->migration_blocker, "vGICv3 migration is not implemented");
+ migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ return;
+ }
+
/* Try to create the device via the device control API */
s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false);
if (s->dev_fd < 0) {
@@ -122,13 +134,6 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd);
- /* Block migration of a KVM GICv3 device: the API for saving and restoring
- * the state in the kernel is not yet finalised in the kernel or
- * implemented in QEMU.
- */
- error_setg(&s->migration_blocker, "vGICv3 migration is not implemented");
- migrate_add_blocker(s->migration_blocker);
-
if (kvm_has_gsi_routing()) {
/* set up irq routing */
kvm_init_irq_routing(kvm_state);
diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h
index 8f3567edaa..aeb801d133 100644
--- a/hw/intc/gicv3_internal.h
+++ b/hw/intc/gicv3_internal.h
@@ -159,6 +159,85 @@
#define ICC_CTLR_EL3_A3V (1U << 15)
#define ICC_CTLR_EL3_NDS (1U << 17)
+#define ICH_VMCR_EL2_VENG0_SHIFT 0
+#define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
+#define ICH_VMCR_EL2_VENG1_SHIFT 1
+#define ICH_VMCR_EL2_VENG1 (1U << ICH_VMCR_EL2_VENG1_SHIFT)
+#define ICH_VMCR_EL2_VACKCTL (1U << 2)
+#define ICH_VMCR_EL2_VFIQEN (1U << 3)
+#define ICH_VMCR_EL2_VCBPR_SHIFT 4
+#define ICH_VMCR_EL2_VCBPR (1U << ICH_VMCR_EL2_VCBPR_SHIFT)
+#define ICH_VMCR_EL2_VEOIM_SHIFT 9
+#define ICH_VMCR_EL2_VEOIM (1U << ICH_VMCR_EL2_VEOIM_SHIFT)
+#define ICH_VMCR_EL2_VBPR1_SHIFT 18
+#define ICH_VMCR_EL2_VBPR1_LENGTH 3
+#define ICH_VMCR_EL2_VBPR1_MASK (0x7U << ICH_VMCR_EL2_VBPR1_SHIFT)
+#define ICH_VMCR_EL2_VBPR0_SHIFT 21
+#define ICH_VMCR_EL2_VBPR0_LENGTH 3
+#define ICH_VMCR_EL2_VBPR0_MASK (0x7U << ICH_VMCR_EL2_VBPR0_SHIFT)
+#define ICH_VMCR_EL2_VPMR_SHIFT 24
+#define ICH_VMCR_EL2_VPMR_LENGTH 8
+#define ICH_VMCR_EL2_VPMR_MASK (0xffU << ICH_VMCR_EL2_VPMR_SHIFT)
+
+#define ICH_HCR_EL2_EN (1U << 0)
+#define ICH_HCR_EL2_UIE (1U << 1)
+#define ICH_HCR_EL2_LRENPIE (1U << 2)
+#define ICH_HCR_EL2_NPIE (1U << 3)
+#define ICH_HCR_EL2_VGRP0EIE (1U << 4)
+#define ICH_HCR_EL2_VGRP0DIE (1U << 5)
+#define ICH_HCR_EL2_VGRP1EIE (1U << 6)
+#define ICH_HCR_EL2_VGRP1DIE (1U << 7)
+#define ICH_HCR_EL2_TC (1U << 10)
+#define ICH_HCR_EL2_TALL0 (1U << 11)
+#define ICH_HCR_EL2_TALL1 (1U << 12)
+#define ICH_HCR_EL2_TSEI (1U << 13)
+#define ICH_HCR_EL2_TDIR (1U << 14)
+#define ICH_HCR_EL2_EOICOUNT_SHIFT 27
+#define ICH_HCR_EL2_EOICOUNT_LENGTH 5
+#define ICH_HCR_EL2_EOICOUNT_MASK (0x1fU << ICH_HCR_EL2_EOICOUNT_SHIFT)
+
+#define ICH_LR_EL2_VINTID_SHIFT 0
+#define ICH_LR_EL2_VINTID_LENGTH 32
+#define ICH_LR_EL2_VINTID_MASK (0xffffffffULL << ICH_LR_EL2_VINTID_SHIFT)
+#define ICH_LR_EL2_PINTID_SHIFT 32
+#define ICH_LR_EL2_PINTID_LENGTH 10
+#define ICH_LR_EL2_PINTID_MASK (0x3ffULL << ICH_LR_EL2_PINTID_SHIFT)
+/* Note that EOI shares with the top bit of the pINTID field */
+#define ICH_LR_EL2_EOI (1ULL << 41)
+#define ICH_LR_EL2_PRIORITY_SHIFT 48
+#define ICH_LR_EL2_PRIORITY_LENGTH 8
+#define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT)
+#define ICH_LR_EL2_GROUP (1ULL << 60)
+#define ICH_LR_EL2_HW (1ULL << 61)
+#define ICH_LR_EL2_STATE_SHIFT 62
+#define ICH_LR_EL2_STATE_LENGTH 2
+#define ICH_LR_EL2_STATE_MASK (3ULL << ICH_LR_EL2_STATE_SHIFT)
+/* values for the state field: */
+#define ICH_LR_EL2_STATE_INVALID 0
+#define ICH_LR_EL2_STATE_PENDING 1
+#define ICH_LR_EL2_STATE_ACTIVE 2
+#define ICH_LR_EL2_STATE_ACTIVE_PENDING 3
+#define ICH_LR_EL2_STATE_PENDING_BIT (1ULL << ICH_LR_EL2_STATE_SHIFT)
+#define ICH_LR_EL2_STATE_ACTIVE_BIT (2ULL << ICH_LR_EL2_STATE_SHIFT)
+
+#define ICH_MISR_EL2_EOI (1U << 0)
+#define ICH_MISR_EL2_U (1U << 1)
+#define ICH_MISR_EL2_LRENP (1U << 2)
+#define ICH_MISR_EL2_NP (1U << 3)
+#define ICH_MISR_EL2_VGRP0E (1U << 4)
+#define ICH_MISR_EL2_VGRP0D (1U << 5)
+#define ICH_MISR_EL2_VGRP1E (1U << 6)
+#define ICH_MISR_EL2_VGRP1D (1U << 7)
+
+#define ICH_VTR_EL2_LISTREGS_SHIFT 0
+#define ICH_VTR_EL2_TDS (1U << 19)
+#define ICH_VTR_EL2_NV4 (1U << 20)
+#define ICH_VTR_EL2_A3V (1U << 21)
+#define ICH_VTR_EL2_SEIS (1U << 22)
+#define ICH_VTR_EL2_IDBITS_SHIFT 23
+#define ICH_VTR_EL2_PREBITS_SHIFT 26
+#define ICH_VTR_EL2_PRIBITS_SHIFT 29
+
/* Special interrupt IDs */
#define INTID_SECURE 1020
#define INTID_NONSECURE 1021
diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c
index ea7ea0bce8..9047b8950a 100644
--- a/hw/intc/ioapic.c
+++ b/hw/intc/ioapic.c
@@ -33,6 +33,7 @@
#include "target/i386/cpu.h"
#include "hw/i386/apic-msidef.h"
#include "hw/i386/x86-iommu.h"
+#include "trace.h"
//#define DEBUG_IOAPIC
@@ -115,6 +116,7 @@ static void ioapic_service(IOAPICCommonState *s)
s->irr &= ~mask;
} else {
coalesce = s->ioredtbl[i] & IOAPIC_LVT_REMOTE_IRR;
+ trace_ioapic_set_remote_irr(i);
s->ioredtbl[i] |= IOAPIC_LVT_REMOTE_IRR;
}
@@ -220,6 +222,8 @@ void ioapic_eoi_broadcast(int vector)
uint64_t entry;
int i, n;
+ trace_ioapic_eoi_broadcast(vector);
+
for (i = 0; i < MAX_IOAPICS; i++) {
s = ioapics[i];
if (!s) {
@@ -229,6 +233,7 @@ void ioapic_eoi_broadcast(int vector)
entry = s->ioredtbl[n];
if ((entry & IOAPIC_LVT_REMOTE_IRR)
&& (entry & IOAPIC_VECTOR_MASK) == vector) {
+ trace_ioapic_clear_remote_irr(n, vector);
s->ioredtbl[n] = entry & ~IOAPIC_LVT_REMOTE_IRR;
if (!(entry & IOAPIC_LVT_MASKED) && (s->irr & (1 << n))) {
ioapic_service(s);
@@ -256,7 +261,9 @@ ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size)
int index;
uint32_t val = 0;
- switch (addr & 0xff) {
+ addr &= 0xff;
+
+ switch (addr) {
case IOAPIC_IOREGSEL:
val = s->ioregsel;
break;
@@ -286,6 +293,9 @@ ioapic_mem_read(void *opaque, hwaddr addr, unsigned int size)
DPRINTF("read: %08x = %08x\n", s->ioregsel, val);
break;
}
+
+ trace_ioapic_mem_read(addr, size, val);
+
return val;
}
@@ -324,7 +334,10 @@ ioapic_mem_write(void *opaque, hwaddr addr, uint64_t val,
IOAPICCommonState *s = opaque;
int index;
- switch (addr & 0xff) {
+ addr &= 0xff;
+ trace_ioapic_mem_write(addr, size, val);
+
+ switch (addr) {
case IOAPIC_IOREGSEL:
s->ioregsel = val;
break;
@@ -426,6 +439,11 @@ static void ioapic_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
k->realize = ioapic_realize;
+ /*
+ * If APIC is in kernel, we need to update the kernel cache after
+ * migration, otherwise first 24 gsi routes will be invalid.
+ */
+ k->post_load = ioapic_update_kvm_routes;
dc->reset = ioapic_reset_common;
dc->props = ioapic_properties;
}
diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c
index 1b7ec5ec20..97c4f9c2df 100644
--- a/hw/intc/ioapic_common.c
+++ b/hw/intc/ioapic_common.c
@@ -58,7 +58,8 @@ void ioapic_print_redtbl(Monitor *mon, IOAPICCommonState *s)
uint32_t remote_irr = 0;
int i;
- monitor_printf(mon, "ioapic id=0x%02x sel=0x%02x", s->id, s->ioregsel);
+ monitor_printf(mon, "ioapic ver=0x%x id=0x%02x sel=0x%02x",
+ s->version, s->id, s->ioregsel);
if (s->ioregsel) {
monitor_printf(mon, " (redir[%u])\n",
(s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1);
diff --git a/hw/intc/nios2_iic.c b/hw/intc/nios2_iic.c
new file mode 100644
index 0000000000..818ab1b315
--- /dev/null
+++ b/hw/intc/nios2_iic.c
@@ -0,0 +1,103 @@
+/*
+ * QEMU Altera Internal Interrupt Controller.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qapi/error.h"
+
+#include "hw/sysbus.h"
+#include "cpu.h"
+
+#define TYPE_ALTERA_IIC "altera,iic"
+#define ALTERA_IIC(obj) \
+ OBJECT_CHECK(AlteraIIC, (obj), TYPE_ALTERA_IIC)
+
+typedef struct AlteraIIC {
+ SysBusDevice parent_obj;
+ void *cpu;
+ qemu_irq parent_irq;
+} AlteraIIC;
+
+static void update_irq(AlteraIIC *pv)
+{
+ CPUNios2State *env = &((Nios2CPU *)(pv->cpu))->env;
+
+ qemu_set_irq(pv->parent_irq,
+ env->regs[CR_IPENDING] & env->regs[CR_IENABLE]);
+}
+
+static void irq_handler(void *opaque, int irq, int level)
+{
+ AlteraIIC *pv = opaque;
+ CPUNios2State *env = &((Nios2CPU *)(pv->cpu))->env;
+
+ env->regs[CR_IPENDING] &= ~(1 << irq);
+ env->regs[CR_IPENDING] |= !!level << irq;
+
+ update_irq(pv);
+}
+
+static void altera_iic_init(Object *obj)
+{
+ AlteraIIC *pv = ALTERA_IIC(obj);
+
+ qdev_init_gpio_in(DEVICE(pv), irq_handler, 32);
+ sysbus_init_irq(SYS_BUS_DEVICE(obj), &pv->parent_irq);
+}
+
+static Property altera_iic_properties[] = {
+ DEFINE_PROP_PTR("cpu", AlteraIIC, cpu),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void altera_iic_realize(DeviceState *dev, Error **errp)
+{
+ struct AlteraIIC *pv = ALTERA_IIC(dev);
+
+ if (!pv->cpu) {
+ error_setg(errp, "altera,iic: CPU not connected");
+ return;
+ }
+}
+
+static void altera_iic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->props = altera_iic_properties;
+ /* Reason: pointer property "cpu" */
+ dc->cannot_instantiate_with_device_add_yet = true;
+ dc->realize = altera_iic_realize;
+}
+
+static TypeInfo altera_iic_info = {
+ .name = "altera,iic",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AlteraIIC),
+ .instance_init = altera_iic_init,
+ .class_init = altera_iic_class_init,
+};
+
+static void altera_iic_register(void)
+{
+ type_register_static(&altera_iic_info);
+}
+
+type_init(altera_iic_register)
diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c
index 21ac2e2dcd..da8e4dfab6 100644
--- a/hw/intc/s390_flic_kvm.c
+++ b/hw/intc/s390_flic_kvm.c
@@ -201,7 +201,7 @@ static int kvm_s390_register_io_adapter(S390FLICState *fs, uint32_t id,
.addr = (uint64_t)&adapter,
};
- if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) {
+ if (!kvm_gsi_routing_enabled()) {
/* nothing to do */
return 0;
}
@@ -226,7 +226,7 @@ static int kvm_s390_io_adapter_map(S390FLICState *fs, uint32_t id,
KVMS390FLICState *flic = KVM_S390_FLIC(fs);
int r;
- if (!kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING)) {
+ if (!kvm_gsi_routing_enabled()) {
/* nothing to do */
return 0;
}
@@ -286,7 +286,8 @@ static void kvm_s390_release_adapter_routes(S390FLICState *fs,
* increase until buffer is sufficient or maxium size is
* reached
*/
-static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size)
+static int kvm_flic_save(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
KVMS390FLICState *flic = opaque;
int len = FLIC_SAVE_INITIAL_SIZE;
@@ -319,6 +320,8 @@ static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size)
count * sizeof(struct kvm_s390_irq));
}
g_free(buf);
+
+ return 0;
}
/**
@@ -331,7 +334,8 @@ static void kvm_flic_save(QEMUFile *f, void *opaque, size_t size)
* Note: Do nothing when no interrupts where stored
* in QEMUFile
*/
-static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size)
+static int kvm_flic_load(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
uint64_t len = 0;
uint64_t count = 0;
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 340f617761..92a6171692 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -14,6 +14,13 @@ apic_deliver_irq(uint8_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t
apic_mem_readl(uint64_t addr, uint32_t val) "%"PRIx64" = %08x"
apic_mem_writel(uint64_t addr, uint32_t val) "%"PRIx64" = %08x"
+# hw/intc/ioapic.c
+ioapic_set_remote_irr(int n) "set remote irr for pin %d"
+ioapic_clear_remote_irr(int n, int vector) "clear remote irr for pin %d vector %d"
+ioapic_eoi_broadcast(int vector) "EOI broadcast for vector %d"
+ioapic_mem_read(uint8_t addr, uint8_t size, uint32_t val) "ioapic mem read addr 0x%"PRIx8" size 0x%"PRIx8" retval 0x%"PRIx32
+ioapic_mem_write(uint8_t addr, uint8_t size, uint32_t val) "ioapic mem write addr 0x%"PRIx8" size 0x%"PRIx8" val 0x%"PRIx32
+
# hw/intc/slavio_intctl.c
slavio_intctl_mem_readl(uint32_t cpu, uint64_t addr, uint32_t ret) "read cpu %d reg 0x%"PRIx64" = %x"
slavio_intctl_mem_writel(uint32_t cpu, uint64_t addr, uint32_t val) "write cpu %d reg 0x%"PRIx64" = %x"
@@ -107,6 +114,39 @@ gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu %x
gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu %x value 0x%" PRIx64
gicv3_icc_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICC_DIR write cpu %x value 0x%" PRIx64
gicv3_icc_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_RPR read cpu %x value 0x%" PRIx64
+gicv3_ich_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d read cpu %x value 0x%" PRIx64
+gicv3_ich_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_AP%dR%d write cpu %x value 0x%" PRIx64
+gicv3_ich_hcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 read cpu %x value 0x%" PRIx64
+gicv3_ich_hcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_HCR_EL2 write cpu %x value 0x%" PRIx64
+gicv3_ich_vmcr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 read cpu %x value 0x%" PRIx64
+gicv3_ich_vmcr_write(uint32_t cpu, uint64_t val) "GICv3 ICH_VMCR_EL2 write cpu %x value 0x%" PRIx64
+gicv3_ich_lr_read(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 read cpu %x value 0x%" PRIx64
+gicv3_ich_lr32_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d read cpu %x value 0x%" PRIx32
+gicv3_ich_lrc_read(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d read cpu %x value 0x%" PRIx32
+gicv3_ich_lr_write(int regno, uint32_t cpu, uint64_t val) "GICv3 ICH_LR%d_EL2 write cpu %x value 0x%" PRIx64
+gicv3_ich_lr32_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LR%d write cpu %x value 0x%" PRIx32
+gicv3_ich_lrc_write(int regno, uint32_t cpu, uint32_t val) "GICv3 ICH_LRC%d write cpu %x value 0x%" PRIx32
+gicv3_ich_vtr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_VTR read cpu %x value 0x%" PRIx64
+gicv3_ich_misr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_MISR read cpu %x value 0x%" PRIx64
+gicv3_ich_eisr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_EISR read cpu %x value 0x%" PRIx64
+gicv3_ich_elrsr_read(uint32_t cpu, uint64_t val) "GICv3 ICH_ELRSR read cpu %x value 0x%" PRIx64
+gicv3_icv_ap_read(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_ap_write(int grp, int regno, uint32_t cpu, uint64_t val) "GICv3 ICV_AP%dR%d write cpu %x value 0x%" PRIx64
+gicv3_icv_bpr_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_bpr_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_BPR%d write cpu %x value 0x%" PRIx64
+gicv3_icv_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR read cpu %x value 0x%" PRIx64
+gicv3_icv_pmr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_PMR write cpu %x value 0x%" PRIx64
+gicv3_icv_igrpen_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d read cpu %x value 0x%" PRIx64
+gicv3_icv_igrpen_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IGRPEN%d write cpu %x value 0x%" PRIx64
+gicv3_icv_ctlr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR read cpu %x value 0x%" PRIx64
+gicv3_icv_ctlr_write(uint32_t cpu, uint64_t val) "GICv3 ICV_CTLR write cpu %x value 0x%" PRIx64
+gicv3_icv_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_RPR read cpu %x value 0x%" PRIx64
+gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu %x value 0x%" PRIx64
+gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu %x value 0x%" PRIx64
+gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu %x value 0x%" PRIx64
+gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f %x virt HPPI update LR index %d"
+gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f %x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d"
# hw/intc/arm_gicv3_dist.c
gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d"
diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c
index 9d07b118c0..0ffbc8dd28 100644
--- a/hw/isa/isa-bus.c
+++ b/hw/isa/isa-bus.c
@@ -219,6 +219,7 @@ static void isabus_bridge_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->fw_name = "isa";
}
diff --git a/hw/lm32/lm32_hwsetup.h b/hw/lm32/lm32_hwsetup.h
index 23e18784df..a01f6bc5df 100644
--- a/hw/lm32/lm32_hwsetup.h
+++ b/hw/lm32/lm32_hwsetup.h
@@ -75,7 +75,7 @@ static inline void hwsetup_create_rom(HWSetup *hw,
hwaddr base)
{
rom_add_blob("hwsetup", hw->data, TARGET_PAGE_SIZE,
- TARGET_PAGE_SIZE, base, NULL, NULL, NULL, NULL);
+ TARGET_PAGE_SIZE, base, NULL, NULL, NULL, NULL, true);
}
static inline void hwsetup_add_u8(HWSetup *hw, uint8_t u)
diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c
index 3438314c35..bad1d332ed 100644
--- a/hw/m68k/mcf5208.c
+++ b/hw/m68k/mcf5208.c
@@ -11,6 +11,7 @@
#include "cpu.h"
#include "hw/hw.h"
#include "hw/m68k/mcf.h"
+#include "hw/m68k/mcf_fec.h"
#include "qemu/timer.h"
#include "hw/ptimer.h"
#include "sysemu/sysemu.h"
@@ -18,6 +19,7 @@
#include "net/net.h"
#include "hw/boards.h"
#include "hw/loader.h"
+#include "hw/sysbus.h"
#include "elf.h"
#include "exec/address-spaces.h"
@@ -192,6 +194,26 @@ static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic)
}
}
+static void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd, hwaddr base,
+ qemu_irq *irqs)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+ int i;
+
+ qemu_check_nic_model(nd, TYPE_MCF_FEC_NET);
+ dev = qdev_create(NULL, TYPE_MCF_FEC_NET);
+ qdev_set_nic_properties(dev, nd);
+ qdev_init_nofail(dev);
+
+ s = SYS_BUS_DEVICE(dev);
+ for (i = 0; i < FEC_NUM_IRQ; i++) {
+ sysbus_connect_irq(s, i, irqs[i]);
+ }
+
+ memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(s, 0));
+}
+
static void mcf5208evb_init(MachineState *machine)
{
ram_addr_t ram_size = machine->ram_size;
@@ -243,9 +265,10 @@ static void mcf5208evb_init(MachineState *machine)
fprintf(stderr, "Too many NICs\n");
exit(1);
}
- if (nd_table[0].used)
+ if (nd_table[0].used) {
mcf_fec_init(address_space_mem, &nd_table[0],
0xfc030000, pic + 36);
+ }
/* 0xfc000000 SCM. */
/* 0xfc004000 XBS. */
diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c
index b1f3e6f6b8..95022d3607 100644
--- a/hw/misc/aspeed_scu.c
+++ b/hw/misc/aspeed_scu.c
@@ -86,7 +86,7 @@
#define BMC_DEV_ID TO_REG(0x1A4)
#define PROT_KEY_UNLOCK 0x1688A8A8
-#define SCU_IO_REGION_SIZE 0x20000
+#define SCU_IO_REGION_SIZE 0x1000
static const uint32_t ast2400_a0_resets[ASPEED_SCU_NR_REGS] = {
[SYS_RST_CTRL] = 0xFFCFFEDCU,
@@ -231,6 +231,7 @@ static void aspeed_scu_reset(DeviceState *dev)
switch (s->silicon_rev) {
case AST2400_A0_SILICON_REV:
+ case AST2400_A1_SILICON_REV:
reset = ast2400_a0_resets;
break;
case AST2500_A0_SILICON_REV:
@@ -249,6 +250,7 @@ static void aspeed_scu_reset(DeviceState *dev)
static uint32_t aspeed_silicon_revs[] = {
AST2400_A0_SILICON_REV,
+ AST2400_A1_SILICON_REV,
AST2500_A0_SILICON_REV,
AST2500_A1_SILICON_REV,
};
diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c
index 8830dc084c..5f3ac0b6f6 100644
--- a/hw/misc/aspeed_sdmc.c
+++ b/hw/misc/aspeed_sdmc.c
@@ -119,6 +119,7 @@ static void aspeed_sdmc_write(void *opaque, hwaddr addr, uint64_t data,
/* Make sure readonly bits are kept */
switch (s->silicon_rev) {
case AST2400_A0_SILICON_REV:
+ case AST2400_A1_SILICON_REV:
data &= ~ASPEED_SDMC_READONLY_MASK;
break;
case AST2500_A0_SILICON_REV:
@@ -193,6 +194,7 @@ static void aspeed_sdmc_reset(DeviceState *dev)
/* Set ram size bit and defaults values */
switch (s->silicon_rev) {
case AST2400_A0_SILICON_REV:
+ case AST2400_A1_SILICON_REV:
s->regs[R_CONF] |=
ASPEED_SDMC_VGA_COMPAT |
ASPEED_SDMC_DRAM_SIZE(s->ram_bits);
@@ -224,6 +226,7 @@ static void aspeed_sdmc_realize(DeviceState *dev, Error **errp)
switch (s->silicon_rev) {
case AST2400_A0_SILICON_REV:
+ case AST2400_A1_SILICON_REV:
s->ram_bits = ast2400_rambits(s);
break;
case AST2500_A0_SILICON_REV:
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index abeaf3da08..fd14d7a07e 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -840,6 +840,7 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
uint8_t *pci_conf;
uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_PREFETCH;
+ Error *local_err = NULL;
/* IRQFD requires MSI */
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
@@ -903,9 +904,6 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
}
}
- vmstate_register_ram(s->ivshmem_bar2, DEVICE(s));
- pci_register_bar(PCI_DEVICE(s), 2, attr, s->ivshmem_bar2);
-
if (s->master == ON_OFF_AUTO_AUTO) {
s->master = s->vm_id == 0 ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
}
@@ -913,8 +911,16 @@ static void ivshmem_common_realize(PCIDevice *dev, Error **errp)
if (!ivshmem_is_master(s)) {
error_setg(&s->migration_blocker,
"Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
- migrate_add_blocker(s->migration_blocker);
+ migrate_add_blocker(s->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(s->migration_blocker);
+ return;
+ }
}
+
+ vmstate_register_ram(s->ivshmem_bar2, DEVICE(s));
+ pci_register_bar(PCI_DEVICE(s), 2, attr, s->ivshmem_bar2);
}
static void ivshmem_exit(PCIDevice *dev)
diff --git a/hw/misc/tmp105.c b/hw/misc/tmp105.c
index f5c2472b5b..04e83787d4 100644
--- a/hw/misc/tmp105.c
+++ b/hw/misc/tmp105.c
@@ -176,7 +176,7 @@ static int tmp105_tx(I2CSlave *i2c, uint8_t data)
return 0;
}
-static void tmp105_event(I2CSlave *i2c, enum i2c_event event)
+static int tmp105_event(I2CSlave *i2c, enum i2c_event event)
{
TMP105State *s = TMP105(i2c);
@@ -185,6 +185,7 @@ static void tmp105_event(I2CSlave *i2c, enum i2c_event event)
}
s->len = 0;
+ return 0;
}
static int tmp105_post_load(void *opaque, int version_id)
diff --git a/hw/misc/vmport.c b/hw/misc/vmport.c
index c763811a9f..be40930b8b 100644
--- a/hw/misc/vmport.c
+++ b/hw/misc/vmport.c
@@ -25,7 +25,7 @@
#include "hw/hw.h"
#include "hw/isa/isa.h"
#include "hw/i386/pc.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "hw/qdev.h"
//#define VMPORT_DEBUG
diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c
index 7915732f74..e99d4544a2 100644
--- a/hw/net/cadence_gem.c
+++ b/hw/net/cadence_gem.c
@@ -896,7 +896,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size);
- /* Find which queue we are targetting */
+ /* Find which queue we are targeting */
q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
while (bytes_to_copy) {
diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c
index 17f0338d1c..efa33ad40a 100644
--- a/hw/net/dp8393x.c
+++ b/hw/net/dp8393x.c
@@ -174,6 +174,52 @@ typedef struct dp8393xState {
AddressSpace as;
} dp8393xState;
+/* Accessor functions for values which are formed by
+ * concatenating two 16 bit device registers. By putting these
+ * in their own functions with a uint32_t return type we avoid the
+ * pitfall of implicit sign extension where ((x << 16) | y) is a
+ * signed 32 bit integer that might get sign-extended to a 64 bit integer.
+ */
+static uint32_t dp8393x_cdp(dp8393xState *s)
+{
+ return (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_CDP];
+}
+
+static uint32_t dp8393x_crba(dp8393xState *s)
+{
+ return (s->regs[SONIC_CRBA1] << 16) | s->regs[SONIC_CRBA0];
+}
+
+static uint32_t dp8393x_crda(dp8393xState *s)
+{
+ return (s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA];
+}
+
+static uint32_t dp8393x_rbwc(dp8393xState *s)
+{
+ return (s->regs[SONIC_RBWC1] << 16) | s->regs[SONIC_RBWC0];
+}
+
+static uint32_t dp8393x_rrp(dp8393xState *s)
+{
+ return (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_RRP];
+}
+
+static uint32_t dp8393x_tsa(dp8393xState *s)
+{
+ return (s->regs[SONIC_TSA1] << 16) | s->regs[SONIC_TSA0];
+}
+
+static uint32_t dp8393x_ttda(dp8393xState *s)
+{
+ return (s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA];
+}
+
+static uint32_t dp8393x_wt(dp8393xState *s)
+{
+ return s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0];
+}
+
static void dp8393x_update_irq(dp8393xState *s)
{
int level = (s->regs[SONIC_IMR] & s->regs[SONIC_ISR]) ? 1 : 0;
@@ -203,8 +249,7 @@ static void dp8393x_do_load_cam(dp8393xState *s)
while (s->regs[SONIC_CDC] & 0x1f) {
/* Fill current entry */
- address_space_rw(&s->as,
- (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_CDP],
+ address_space_rw(&s->as, dp8393x_cdp(s),
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->cam[index][0] = data[1 * width] & 0xff;
s->cam[index][1] = data[1 * width] >> 8;
@@ -222,8 +267,7 @@ static void dp8393x_do_load_cam(dp8393xState *s)
}
/* Read CAM enable */
- address_space_rw(&s->as,
- (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_CDP],
+ address_space_rw(&s->as, dp8393x_cdp(s),
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_CE] = data[0 * width];
DPRINTF("load cam done. cam enable mask 0x%04x\n", s->regs[SONIC_CE]);
@@ -242,8 +286,7 @@ static void dp8393x_do_read_rra(dp8393xState *s)
/* Read memory */
width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1;
size = sizeof(uint16_t) * 4 * width;
- address_space_rw(&s->as,
- (s->regs[SONIC_URRA] << 16) | s->regs[SONIC_RRP],
+ address_space_rw(&s->as, dp8393x_rrp(s),
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
/* Update SONIC registers */
@@ -292,7 +335,7 @@ static void dp8393x_set_next_tick(dp8393xState *s)
return;
}
- ticks = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0];
+ ticks = dp8393x_wt(s);
s->wt_last_update = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
delay = NANOSECONDS_PER_SECOND * ticks / 5000000;
timer_mod(s->watchdog, s->wt_last_update + delay);
@@ -309,7 +352,7 @@ static void dp8393x_update_wt_regs(dp8393xState *s)
}
elapsed = s->wt_last_update - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
- val = s->regs[SONIC_WT1] << 16 | s->regs[SONIC_WT0];
+ val = dp8393x_wt(s);
val -= elapsed / 5000000;
s->regs[SONIC_WT1] = (val >> 16) & 0xffff;
s->regs[SONIC_WT0] = (val >> 0) & 0xffff;
@@ -356,12 +399,11 @@ static void dp8393x_do_transmit_packets(dp8393xState *s)
while (1) {
/* Read memory */
- DPRINTF("Transmit packet at %08x\n",
- (s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_CTDA]);
size = sizeof(uint16_t) * 6 * width;
s->regs[SONIC_TTDA] = s->regs[SONIC_CTDA];
+ DPRINTF("Transmit packet at %08x\n", dp8393x_ttda(s));
address_space_rw(&s->as,
- ((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * width,
+ dp8393x_ttda(s) + sizeof(uint16_t) * width,
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
tx_len = 0;
@@ -386,8 +428,7 @@ static void dp8393x_do_transmit_packets(dp8393xState *s)
if (tx_len + len > sizeof(s->tx_buffer)) {
len = sizeof(s->tx_buffer) - tx_len;
}
- address_space_rw(&s->as,
- (s->regs[SONIC_TSA1] << 16) | s->regs[SONIC_TSA0],
+ address_space_rw(&s->as, dp8393x_tsa(s),
MEMTXATTRS_UNSPECIFIED, &s->tx_buffer[tx_len], len, 0);
tx_len += len;
@@ -396,7 +437,7 @@ static void dp8393x_do_transmit_packets(dp8393xState *s)
/* Read next fragment details */
size = sizeof(uint16_t) * 3 * width;
address_space_rw(&s->as,
- ((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * (4 + 3 * i) * width,
+ dp8393x_ttda(s) + sizeof(uint16_t) * (4 + 3 * i) * width,
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_TSA0] = data[0 * width];
s->regs[SONIC_TSA1] = data[1 * width];
@@ -430,14 +471,16 @@ static void dp8393x_do_transmit_packets(dp8393xState *s)
data[0 * width] = s->regs[SONIC_TCR] & 0x0fff; /* status */
size = sizeof(uint16_t) * width;
address_space_rw(&s->as,
- (s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA],
+ dp8393x_ttda(s),
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 1);
if (!(s->regs[SONIC_CR] & SONIC_CR_HTX)) {
/* Read footer of packet */
size = sizeof(uint16_t) * width;
address_space_rw(&s->as,
- ((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * (4 + 3 * s->regs[SONIC_TFC]) * width,
+ dp8393x_ttda(s) +
+ sizeof(uint16_t) *
+ (4 + 3 * s->regs[SONIC_TFC]) * width,
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_CTDA] = data[0 * width] & ~0x1;
if (data[0 * width] & 0x1) {
@@ -700,7 +743,7 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
if (s->regs[SONIC_LLFA] & 0x1) {
/* Are we still in resource exhaustion? */
size = sizeof(uint16_t) * 1 * width;
- address = ((s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]) + sizeof(uint16_t) * 5 * width;
+ address = dp8393x_crda(s) + sizeof(uint16_t) * 5 * width;
address_space_rw(&s->as, address, MEMTXATTRS_UNSPECIFIED,
(uint8_t *)data, size, 0);
if (data[0 * width] & 0x1) {
@@ -719,8 +762,8 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
checksum = cpu_to_le32(crc32(0, buf, rx_len));
/* Put packet into RBA */
- DPRINTF("Receive packet at %08x\n", (s->regs[SONIC_CRBA1] << 16) | s->regs[SONIC_CRBA0]);
- address = (s->regs[SONIC_CRBA1] << 16) | s->regs[SONIC_CRBA0];
+ DPRINTF("Receive packet at %08x\n", dp8393x_crba(s));
+ address = dp8393x_crba(s);
address_space_rw(&s->as, address,
MEMTXATTRS_UNSPECIFIED, (uint8_t *)buf, rx_len, 1);
address += rx_len;
@@ -729,13 +772,13 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
rx_len += 4;
s->regs[SONIC_CRBA1] = address >> 16;
s->regs[SONIC_CRBA0] = address & 0xffff;
- available = (s->regs[SONIC_RBWC1] << 16) | s->regs[SONIC_RBWC0];
+ available = dp8393x_rbwc(s);
available -= rx_len / 2;
s->regs[SONIC_RBWC1] = available >> 16;
s->regs[SONIC_RBWC0] = available & 0xffff;
/* Update status */
- if (((s->regs[SONIC_RBWC1] << 16) | s->regs[SONIC_RBWC0]) < s->regs[SONIC_EOBC]) {
+ if (dp8393x_rbwc(s) < s->regs[SONIC_EOBC]) {
s->regs[SONIC_RCR] |= SONIC_RCR_LPKT;
}
s->regs[SONIC_RCR] |= packet_type;
@@ -746,20 +789,19 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
}
/* Write status to memory */
- DPRINTF("Write status at %08x\n", (s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]);
+ DPRINTF("Write status at %08x\n", dp8393x_crda(s));
data[0 * width] = s->regs[SONIC_RCR]; /* status */
data[1 * width] = rx_len; /* byte count */
data[2 * width] = s->regs[SONIC_TRBA0]; /* pkt_ptr0 */
data[3 * width] = s->regs[SONIC_TRBA1]; /* pkt_ptr1 */
data[4 * width] = s->regs[SONIC_RSC]; /* seq_no */
size = sizeof(uint16_t) * 5 * width;
- address_space_rw(&s->as, (s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA],
+ address_space_rw(&s->as, dp8393x_crda(s),
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 1);
/* Move to next descriptor */
size = sizeof(uint16_t) * width;
- address_space_rw(&s->as,
- ((s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]) + sizeof(uint16_t) * 5 * width,
+ address_space_rw(&s->as, dp8393x_crda(s) + sizeof(uint16_t) * 5 * width,
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, size, 0);
s->regs[SONIC_LLFA] = data[0 * width];
if (s->regs[SONIC_LLFA] & 0x1) {
@@ -767,8 +809,7 @@ static ssize_t dp8393x_receive(NetClientState *nc, const uint8_t * buf,
s->regs[SONIC_ISR] |= SONIC_ISR_RDE;
} else {
data[0 * width] = 0; /* in_use */
- address_space_rw(&s->as,
- ((s->regs[SONIC_URDA] << 16) | s->regs[SONIC_CRDA]) + sizeof(uint16_t) * 6 * width,
+ address_space_rw(&s->as, dp8393x_crda(s) + sizeof(uint16_t) * 6 * width,
MEMTXATTRS_UNSPECIFIED, (uint8_t *)data, sizeof(uint16_t), 1);
s->regs[SONIC_CRDA] = s->regs[SONIC_LLFA];
s->regs[SONIC_ISR] |= SONIC_ISR_PKTRX;
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index 4994e1ca00..0e9a25b7ab 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -472,7 +472,8 @@ static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp)
hw_error("Failed to initialize PM capability");
}
- if (pcie_aer_init(pci_dev, e1000e_aer_offset, PCI_ERR_SIZEOF) < 0) {
+ if (pcie_aer_init(pci_dev, PCI_ERR_VER, e1000e_aer_offset,
+ PCI_ERR_SIZEOF, NULL) < 0) {
hw_error("Failed to initialize AER capability");
}
@@ -592,7 +593,7 @@ static const VMStateDescription e1000e_vmstate = {
.pre_save = e1000e_pre_save,
.post_load = e1000e_post_load,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj, E1000EState),
+ VMSTATE_PCI_DEVICE(parent_obj, E1000EState),
VMSTATE_MSIX(parent_obj, E1000EState),
VMSTATE_UINT32(ioaddr, E1000EState),
diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c
index 54c01275d4..d0f93eebfc 100644
--- a/hw/net/fsl_etsec/rings.c
+++ b/hw/net/fsl_etsec/rings.c
@@ -358,25 +358,24 @@ void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr)
/* Save flags before BD update */
bd_flags = bd.flags;
- if (bd_flags & BD_TX_READY) {
- process_tx_bd(etsec, &bd);
-
- /* Write back BD after update */
- write_buffer_descriptor(etsec, bd_addr, &bd);
+ if (!(bd_flags & BD_TX_READY)) {
+ break;
}
+ process_tx_bd(etsec, &bd);
+ /* Write back BD after update */
+ write_buffer_descriptor(etsec, bd_addr, &bd);
+
/* Wrap or next BD */
if (bd_flags & BD_WRAP) {
bd_addr = ring_base;
} else {
bd_addr += sizeof(eTSEC_rxtx_bd);
}
+ } while (TRUE);
- } while (bd_addr != ring_base);
-
- bd_addr = ring_base;
-
- /* Save the Buffer Descriptor Pointers to current bd */
+ /* Save the Buffer Descriptor Pointers to last bd that was not
+ * succesfully closed */
etsec->regs[TBPTR0 + ring_nbr].value = bd_addr;
/* Set transmit halt THLTx */
diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c
index 4025eb3b33..a3eca7e0f5 100644
--- a/hw/net/mcf_fec.c
+++ b/hw/net/mcf_fec.c
@@ -9,7 +9,9 @@
#include "hw/hw.h"
#include "net/net.h"
#include "hw/m68k/mcf.h"
+#include "hw/m68k/mcf_fec.h"
#include "hw/net/mii.h"
+#include "hw/sysbus.h"
/* For crc32 */
#include <zlib.h>
#include "exec/address-spaces.h"
@@ -27,9 +29,10 @@ do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
#define FEC_MAX_FRAME_SIZE 2032
typedef struct {
- MemoryRegion *sysmem;
+ SysBusDevice parent_obj;
+
MemoryRegion iomem;
- qemu_irq *irq;
+ qemu_irq irq[FEC_NUM_IRQ];
NICState *nic;
NICConf conf;
uint32_t irq_state;
@@ -68,7 +71,6 @@ typedef struct {
#define FEC_RESET 1
/* Map interrupt flags onto IRQ lines. */
-#define FEC_NUM_IRQ 13
static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = {
FEC_INT_TXF,
FEC_INT_TXB,
@@ -208,8 +210,10 @@ static void mcf_fec_enable_rx(mcf_fec_state *s)
}
}
-static void mcf_fec_reset(mcf_fec_state *s)
+static void mcf_fec_reset(DeviceState *dev)
{
+ mcf_fec_state *s = MCF_FEC_NET(dev);
+
s->eir = 0;
s->eimr = 0;
s->rx_enabled = 0;
@@ -330,7 +334,7 @@ static void mcf_fec_write(void *opaque, hwaddr addr,
s->ecr = value;
if (value & FEC_RESET) {
DPRINTF("Reset\n");
- mcf_fec_reset(s);
+ mcf_fec_reset(opaque);
}
if ((s->ecr & FEC_EN) == 0) {
s->rx_enabled = 0;
@@ -513,24 +517,55 @@ static NetClientInfo net_mcf_fec_info = {
.receive = mcf_fec_receive,
};
-void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd,
- hwaddr base, qemu_irq *irq)
+static void mcf_fec_realize(DeviceState *dev, Error **errp)
{
- mcf_fec_state *s;
+ mcf_fec_state *s = MCF_FEC_NET(dev);
+
+ s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf,
+ object_get_typename(OBJECT(dev)), dev->id, s);
+ qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
+}
- qemu_check_nic_model(nd, "mcf_fec");
+static void mcf_fec_instance_init(Object *obj)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ mcf_fec_state *s = MCF_FEC_NET(obj);
+ int i;
+
+ memory_region_init_io(&s->iomem, obj, &mcf_fec_ops, s, "fec", 0x400);
+ sysbus_init_mmio(sbd, &s->iomem);
+ for (i = 0; i < FEC_NUM_IRQ; i++) {
+ sysbus_init_irq(sbd, &s->irq[i]);
+ }
+}
- s = (mcf_fec_state *)g_malloc0(sizeof(mcf_fec_state));
- s->sysmem = sysmem;
- s->irq = irq;
+static Property mcf_fec_properties[] = {
+ DEFINE_NIC_PROPERTIES(mcf_fec_state, conf),
+ DEFINE_PROP_END_OF_LIST(),
+};
- memory_region_init_io(&s->iomem, NULL, &mcf_fec_ops, s, "fec", 0x400);
- memory_region_add_subregion(sysmem, base, &s->iomem);
+static void mcf_fec_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
- s->conf.macaddr = nd->macaddr;
- s->conf.peers.ncs[0] = nd->netdev;
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+ dc->realize = mcf_fec_realize;
+ dc->desc = "MCF Fast Ethernet Controller network device";
+ dc->reset = mcf_fec_reset;
+ dc->props = mcf_fec_properties;
+}
- s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, nd->model, nd->name, s);
+static const TypeInfo mcf_fec_info = {
+ .name = TYPE_MCF_FEC_NET,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(mcf_fec_state),
+ .instance_init = mcf_fec_instance_init,
+ .class_init = mcf_fec_class_init,
+};
- qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
+static void mcf_fec_register_types(void)
+{
+ type_register_static(&mcf_fec_info);
}
+
+type_init(mcf_fec_register_types)
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index f05e59c85f..671c7e48c6 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -1205,6 +1205,20 @@ static void rtl8139_reset_rxring(RTL8139State *s, uint32_t bufferSize)
s->RxBufAddr = 0;
}
+static void rtl8139_reset_phy(RTL8139State *s)
+{
+ s->BasicModeStatus = 0x7809;
+ s->BasicModeStatus |= 0x0020; /* autonegotiation completed */
+ /* preserve link state */
+ s->BasicModeStatus |= qemu_get_queue(s->nic)->link_down ? 0 : 0x04;
+
+ s->NWayAdvert = 0x05e1; /* all modes, full duplex */
+ s->NWayLPAR = 0x05e1; /* all modes, full duplex */
+ s->NWayExpansion = 0x0001; /* autonegotiation supported */
+
+ s->CSCR = CSCR_F_LINK_100 | CSCR_HEART_BIT | CSCR_LD;
+}
+
static void rtl8139_reset(DeviceState *d)
{
RTL8139State *s = RTL8139(d);
@@ -1256,25 +1270,14 @@ static void rtl8139_reset(DeviceState *d)
s->Config3 = 0x1; /* fast back-to-back compatible */
s->Config5 = 0x0;
- s->CSCR = CSCR_F_LINK_100 | CSCR_HEART_BIT | CSCR_LD;
-
s->CpCmd = 0x0; /* reset C+ mode */
s->cplus_enabled = 0;
-
// s->BasicModeCtrl = 0x3100; // 100Mbps, full duplex, autonegotiation
// s->BasicModeCtrl = 0x2100; // 100Mbps, full duplex
s->BasicModeCtrl = 0x1000; // autonegotiation
- s->BasicModeStatus = 0x7809;
- //s->BasicModeStatus |= 0x0040; /* UTP medium */
- s->BasicModeStatus |= 0x0020; /* autonegotiation completed */
- /* preserve link state */
- s->BasicModeStatus |= qemu_get_queue(s->nic)->link_down ? 0 : 0x04;
-
- s->NWayAdvert = 0x05e1; /* all modes, full duplex */
- s->NWayLPAR = 0x05e1; /* all modes, full duplex */
- s->NWayExpansion = 0x0001; /* autonegotiation supported */
+ rtl8139_reset_phy(s);
/* also reset timer and disable timer interrupt */
s->TCTR = 0;
@@ -1469,7 +1472,7 @@ static void rtl8139_BasicModeCtrl_write(RTL8139State *s, uint32_t val)
DPRINTF("BasicModeCtrl register write(w) val=0x%04x\n", val);
/* mask unwritable bits */
- uint32_t mask = 0x4cff;
+ uint32_t mask = 0xccff;
if (1 || !rtl8139_config_writable(s))
{
@@ -1479,6 +1482,11 @@ static void rtl8139_BasicModeCtrl_write(RTL8139State *s, uint32_t val)
mask |= 0x0100;
}
+ if (val & 0x8000) {
+ /* Reset PHY */
+ rtl8139_reset_phy(s);
+ }
+
val = SET_MASKED(val, mask, s->BasicModeCtrl);
s->BasicModeCtrl = val;
diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c
index 01ecb02773..058908d8d7 100644
--- a/hw/net/spapr_llan.c
+++ b/hw/net/spapr_llan.c
@@ -105,7 +105,7 @@ typedef struct VIOsPAPRVLANDevice {
uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
hwaddr rxq_ptr;
QEMUTimer *rxp_timer;
- uint32_t compat_flags; /* Compatability flags for migration */
+ uint32_t compat_flags; /* Compatibility flags for migration */
RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */
} VIOsPAPRVLANDevice;
@@ -559,7 +559,7 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
if (pool < 0) {
/*
* No matching pool found? Try to use a new one. If the guest used all
- * pools before, but changed the size of one pool inbetween, we might
+ * pools before, but changed the size of one pool in the meantime, we might
* need to recycle that pool here (if it's empty already). Thus scan
* all buffer pools now, starting with the last (likely empty) one.
*/
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index f2d49ad7e7..22874a9777 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -51,6 +51,8 @@ static const int kernel_feature_bits[] = {
VIRTIO_RING_F_EVENT_IDX,
VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_F_VERSION_1,
+ VIRTIO_NET_F_MTU,
+ VIRTIO_F_IOMMU_PLATFORM,
VHOST_INVALID_FEATURE_BIT
};
@@ -74,6 +76,7 @@ static const int user_feature_bits[] = {
VIRTIO_NET_F_HOST_ECN,
VIRTIO_NET_F_HOST_UFO,
VIRTIO_NET_F_MRG_RXBUF,
+ VIRTIO_NET_F_MTU,
/* This bit implies RARP isn't sent by QEMU out of band */
VIRTIO_NET_F_GUEST_ANNOUNCE,
@@ -435,6 +438,17 @@ int vhost_set_vring_enable(NetClientState *nc, int enable)
return 0;
}
+int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
+{
+ const VhostOps *vhost_ops = net->dev.vhost_ops;
+
+ if (!vhost_ops->vhost_net_set_mtu) {
+ return 0;
+ }
+
+ return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
+}
+
#else
uint64_t vhost_net_get_max_queues(VHostNetState *net)
{
@@ -501,4 +515,9 @@ int vhost_set_vring_enable(NetClientState *nc, int enable)
{
return 0;
}
+
+int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
+{
+ return 0;
+}
#endif
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 5009533cfa..7b3ad4a9f0 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -55,6 +55,8 @@ static VirtIOFeature feature_sizes[] = {
.end = endof(struct virtio_net_config, status)},
{.flags = 1 << VIRTIO_NET_F_MQ,
.end = endof(struct virtio_net_config, max_virtqueue_pairs)},
+ {.flags = 1 << VIRTIO_NET_F_MTU,
+ .end = endof(struct virtio_net_config, mtu)},
{}
};
@@ -81,6 +83,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
virtio_stw_p(vdev, &netcfg.status, n->status);
virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
+ virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
memcpy(netcfg.mac, n->mac, ETH_ALEN);
memcpy(config, &netcfg, n->config_size);
}
@@ -152,6 +155,16 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
}
+ if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
+ r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
+ if (r < 0) {
+ error_report("%uBytes MTU not supported by the backend",
+ n->net_conf.mtu);
+
+ return;
+ }
+ }
+
n->vhost_started = 1;
r = vhost_net_start(vdev, n->nic->ncs, queues);
if (r < 0) {
@@ -218,6 +231,14 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
}
}
+static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
+{
+ unsigned int dropped = virtqueue_drop_all(vq);
+ if (dropped) {
+ virtio_notify(vdev, vq);
+ }
+}
+
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
{
VirtIONet *n = VIRTIO_NET(vdev);
@@ -262,6 +283,14 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
} else {
qemu_bh_cancel(q->tx_bh);
}
+ if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
+ (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ /* if tx is waiting we are likely have some packets in tx queue
+ * and disabled notification */
+ q->tx_waiting = 0;
+ virtio_queue_set_notification(q->tx_vq, 1);
+ virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
+ }
}
}
}
@@ -1323,6 +1352,11 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
+ if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
+ virtio_net_drop_tx_queue_data(vdev, vq);
+ return;
+ }
+
/* This happens when device was stopped but VCPU wasn't. */
if (!vdev->vm_running) {
q->tx_waiting = 1;
@@ -1349,6 +1383,11 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
VirtIONet *n = VIRTIO_NET(vdev);
VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
+ if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
+ virtio_net_drop_tx_queue_data(vdev, vq);
+ return;
+ }
+
if (unlikely(q->tx_waiting)) {
return;
}
@@ -1695,6 +1734,7 @@ static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
{
int i, config_size = 0;
virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
+
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
@@ -1724,6 +1764,10 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
NetClientState *nc;
int i;
+ if (n->net_conf.mtu) {
+ n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
+ }
+
virtio_net_set_config_size(n, n->host_features);
virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
@@ -1922,6 +1966,7 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
+ DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 92f6af9620..2cb2731e29 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -2451,7 +2451,8 @@ static void vmxnet3_put_tx_stats_to_file(QEMUFile *f,
qemu_put_be64(f, tx_stat->pktsTxDiscard);
}
-static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size)
+static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
Vmxnet3TxqDescr *r = pv;
@@ -2465,7 +2466,8 @@ static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size)
+static int vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
Vmxnet3TxqDescr *r = pv;
@@ -2474,6 +2476,8 @@ static void vmxnet3_put_txq_descr(QEMUFile *f, void *pv, size_t size)
qemu_put_byte(f, r->intr_idx);
qemu_put_be64(f, r->tx_stats_pa);
vmxnet3_put_tx_stats_to_file(f, &r->txq_stats);
+
+ return 0;
}
static const VMStateInfo txq_descr_info = {
@@ -2512,7 +2516,8 @@ static void vmxnet3_put_rx_stats_to_file(QEMUFile *f,
qemu_put_be64(f, rx_stat->pktsRxError);
}
-static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size)
+static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
Vmxnet3RxqDescr *r = pv;
int i;
@@ -2530,7 +2535,8 @@ static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size)
+static int vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
Vmxnet3RxqDescr *r = pv;
int i;
@@ -2543,6 +2549,8 @@ static void vmxnet3_put_rxq_descr(QEMUFile *f, void *pv, size_t size)
qemu_put_byte(f, r->intr_idx);
qemu_put_be64(f, r->rx_stats_pa);
vmxnet3_put_rx_stats_to_file(f, &r->rxq_stats);
+
+ return 0;
}
static int vmxnet3_post_load(void *opaque, int version_id)
@@ -2575,7 +2583,8 @@ static const VMStateInfo rxq_descr_info = {
.put = vmxnet3_put_rxq_descr
};
-static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size)
+static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
Vmxnet3IntState *r = pv;
@@ -2586,13 +2595,16 @@ static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size)
+static int vmxnet3_put_int_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
Vmxnet3IntState *r = pv;
qemu_put_byte(f, r->is_masked);
qemu_put_byte(f, r->is_pending);
qemu_put_byte(f, r->is_asserted);
+
+ return 0;
}
static const VMStateInfo int_state_info = {
@@ -2619,7 +2631,7 @@ static const VMStateDescription vmstate_vmxnet3_pcie_device = {
.minimum_version_id = 1,
.needed = vmxnet3_vmstate_need_pcie_device,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj, VMXNET3State),
+ VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/nios2/10m50_devboard.c b/hw/nios2/10m50_devboard.c
new file mode 100644
index 0000000000..62e5738b65
--- /dev/null
+++ b/hw/nios2/10m50_devboard.c
@@ -0,0 +1,126 @@
+/*
+ * Altera 10M50 Nios2 GHRD
+ *
+ * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on LabX device code
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "cpu.h"
+
+#include "hw/sysbus.h"
+#include "hw/hw.h"
+#include "hw/char/serial.h"
+#include "sysemu/sysemu.h"
+#include "hw/boards.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+#include "qemu/config-file.h"
+
+#include "boot.h"
+
+#define BINARY_DEVICE_TREE_FILE "10m50-devboard.dtb"
+
+static void nios2_10m50_ghrd_init(MachineState *machine)
+{
+ Nios2CPU *cpu;
+ DeviceState *dev;
+ MemoryRegion *address_space_mem = get_system_memory();
+ MemoryRegion *phys_tcm = g_new(MemoryRegion, 1);
+ MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1);
+ MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
+ MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1);
+ ram_addr_t tcm_base = 0x0;
+ ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */
+ ram_addr_t ram_base = 0x08000000;
+ ram_addr_t ram_size = 0x08000000;
+ qemu_irq *cpu_irq, irq[32];
+ int i;
+
+ /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */
+ memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, &error_abort);
+ memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias",
+ phys_tcm, 0, tcm_size);
+ vmstate_register_ram_global(phys_tcm);
+ memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm);
+ memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base,
+ phys_tcm_alias);
+
+ /* Physical DRAM with alias at 0xc0000000 */
+ memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, &error_abort);
+ memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias",
+ phys_ram, 0, ram_size);
+ vmstate_register_ram_global(phys_ram);
+ memory_region_add_subregion(address_space_mem, ram_base, phys_ram);
+ memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base,
+ phys_ram_alias);
+
+ /* Create CPU -- FIXME */
+ cpu = cpu_nios2_init("nios2");
+
+ /* Register: CPU interrupt controller (PIC) */
+ cpu_irq = nios2_cpu_pic_init(cpu);
+
+ /* Register: Internal Interrupt Controller (IIC) */
+ dev = qdev_create(NULL, "altera,iic");
+ qdev_prop_set_ptr(dev, "cpu", cpu);
+ qdev_init_nofail(dev);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]);
+ for (i = 0; i < 32; i++) {
+ irq[i] = qdev_get_gpio_in(dev, i);
+ }
+
+ /* Register: Altera 16550 UART */
+ serial_mm_init(address_space_mem, 0xf8001600, 2, irq[1], 115200,
+ serial_hds[0], DEVICE_NATIVE_ENDIAN);
+
+ /* Register: Timer sys_clk_timer */
+ dev = qdev_create(NULL, "ALTR.timer");
+ qdev_prop_set_uint32(dev, "clock-frequency", 75 * 1000000);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xf8001440);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[0]);
+
+ /* Register: Timer sys_clk_timer_1 */
+ dev = qdev_create(NULL, "ALTR.timer");
+ qdev_prop_set_uint32(dev, "clock-frequency", 75 * 1000000);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xe0000880);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[5]);
+
+ /* Configure new exception vectors and reset CPU for it to take effect. */
+ cpu->reset_addr = 0xd4000000;
+ cpu->exception_addr = 0xc8000120;
+ cpu->fast_tlb_miss_addr = 0xc0000100;
+
+ nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename,
+ BINARY_DEVICE_TREE_FILE, NULL);
+}
+
+static void nios2_10m50_ghrd_machine_init(struct MachineClass *mc)
+{
+ mc->desc = "Altera 10M50 GHRD Nios II design";
+ mc->init = nios2_10m50_ghrd_init;
+ mc->is_default = 1;
+}
+
+DEFINE_MACHINE("10m50-ghrd", nios2_10m50_ghrd_machine_init);
diff --git a/hw/nios2/Makefile.objs b/hw/nios2/Makefile.objs
new file mode 100644
index 0000000000..6b5c421760
--- /dev/null
+++ b/hw/nios2/Makefile.objs
@@ -0,0 +1 @@
+obj-y = boot.o cpu_pic.o 10m50_devboard.o
diff --git a/hw/nios2/boot.c b/hw/nios2/boot.c
new file mode 100644
index 0000000000..e0a9aff2f4
--- /dev/null
+++ b/hw/nios2/boot.c
@@ -0,0 +1,223 @@
+/*
+ * Nios2 kernel loader
+ *
+ * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on microblaze kernel loader
+ *
+ * Copyright (c) 2012 Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+ * Copyright (c) 2012 PetaLogix
+ * Copyright (c) 2009 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+#include "qemu-common.h"
+#include "sysemu/device_tree.h"
+#include "sysemu/sysemu.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "qemu/cutils.h"
+
+#include "boot.h"
+
+#define NIOS2_MAGIC 0x534f494e
+
+static struct nios2_boot_info {
+ void (*machine_cpu_reset)(Nios2CPU *);
+ uint32_t bootstrap_pc;
+ uint32_t cmdline;
+ uint32_t initrd_start;
+ uint32_t initrd_end;
+ uint32_t fdt;
+} boot_info;
+
+static void main_cpu_reset(void *opaque)
+{
+ Nios2CPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ CPUNios2State *env = &cpu->env;
+
+ cpu_reset(CPU(cpu));
+
+ env->regs[R_ARG0] = NIOS2_MAGIC;
+ env->regs[R_ARG1] = boot_info.initrd_start;
+ env->regs[R_ARG2] = boot_info.fdt;
+ env->regs[R_ARG3] = boot_info.cmdline;
+
+ cpu_set_pc(cs, boot_info.bootstrap_pc);
+ if (boot_info.machine_cpu_reset) {
+ boot_info.machine_cpu_reset(cpu);
+ }
+}
+
+static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
+{
+ return addr - 0xc0000000LL;
+}
+
+static int nios2_load_dtb(struct nios2_boot_info bi, const uint32_t ramsize,
+ const char *kernel_cmdline, const char *dtb_filename)
+{
+ int fdt_size;
+ void *fdt = NULL;
+ int r;
+
+ if (dtb_filename) {
+ fdt = load_device_tree(dtb_filename, &fdt_size);
+ }
+ if (!fdt) {
+ return 0;
+ }
+
+ if (kernel_cmdline) {
+ r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ kernel_cmdline);
+ if (r < 0) {
+ fprintf(stderr, "couldn't set /chosen/bootargs\n");
+ }
+ }
+
+ if (bi.initrd_start) {
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ translate_kernel_address(NULL, bi.initrd_start));
+
+ qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ translate_kernel_address(NULL, bi.initrd_end));
+ }
+
+ cpu_physical_memory_write(bi.fdt, fdt, fdt_size);
+ return fdt_size;
+}
+
+void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base,
+ uint32_t ramsize,
+ const char *initrd_filename,
+ const char *dtb_filename,
+ void (*machine_cpu_reset)(Nios2CPU *))
+{
+ QemuOpts *machine_opts;
+ const char *kernel_filename;
+ const char *kernel_cmdline;
+ const char *dtb_arg;
+ char *filename = NULL;
+
+ machine_opts = qemu_get_machine_opts();
+ kernel_filename = qemu_opt_get(machine_opts, "kernel");
+ kernel_cmdline = qemu_opt_get(machine_opts, "append");
+ dtb_arg = qemu_opt_get(machine_opts, "dtb");
+ /* default to pcbios dtb as passed by machine_init */
+ if (!dtb_arg) {
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename);
+ }
+
+ boot_info.machine_cpu_reset = machine_cpu_reset;
+ qemu_register_reset(main_cpu_reset, cpu);
+
+ if (kernel_filename) {
+ int kernel_size, fdt_size;
+ uint64_t entry, low, high;
+ uint32_t base32;
+ int big_endian = 0;
+
+#ifdef TARGET_WORDS_BIGENDIAN
+ big_endian = 1;
+#endif
+
+ /* Boots a kernel elf binary. */
+ kernel_size = load_elf(kernel_filename, NULL, NULL,
+ &entry, &low, &high,
+ big_endian, EM_ALTERA_NIOS2, 0, 0);
+ base32 = entry;
+ if (base32 == 0xc0000000) {
+ kernel_size = load_elf(kernel_filename, translate_kernel_address,
+ NULL, &entry, NULL, NULL,
+ big_endian, EM_ALTERA_NIOS2, 0, 0);
+ }
+
+ /* Always boot into physical ram. */
+ boot_info.bootstrap_pc = ddr_base + 0xc0000000 + (entry & 0x07ffffff);
+
+ /* If it wasn't an ELF image, try an u-boot image. */
+ if (kernel_size < 0) {
+ hwaddr uentry, loadaddr;
+
+ kernel_size = load_uimage(kernel_filename, &uentry, &loadaddr, 0,
+ NULL, NULL);
+ boot_info.bootstrap_pc = uentry;
+ high = loadaddr + kernel_size;
+ }
+
+ /* Not an ELF image nor an u-boot image, try a RAW image. */
+ if (kernel_size < 0) {
+ kernel_size = load_image_targphys(kernel_filename, ddr_base,
+ ram_size);
+ boot_info.bootstrap_pc = ddr_base;
+ high = ddr_base + kernel_size;
+ }
+
+ high = ROUND_UP(high, 1024 * 1024);
+
+ /* If initrd is available, it goes after the kernel, aligned to 1M. */
+ if (initrd_filename) {
+ int initrd_size;
+ uint32_t initrd_offset;
+
+ boot_info.initrd_start = high;
+ initrd_offset = boot_info.initrd_start - ddr_base;
+
+ initrd_size = load_ramdisk(initrd_filename,
+ boot_info.initrd_start,
+ ram_size - initrd_offset);
+ if (initrd_size < 0) {
+ initrd_size = load_image_targphys(initrd_filename,
+ boot_info.initrd_start,
+ ram_size - initrd_offset);
+ }
+ if (initrd_size < 0) {
+ error_report("qemu: could not load initrd '%s'",
+ initrd_filename);
+ exit(EXIT_FAILURE);
+ }
+ high += initrd_size;
+ }
+ high = ROUND_UP(high, 4);
+ boot_info.initrd_end = high;
+
+ /* Device tree must be placed right after initrd (if available) */
+ boot_info.fdt = high;
+ fdt_size = nios2_load_dtb(boot_info, ram_size, kernel_cmdline,
+ /* Preference a -dtb argument */
+ dtb_arg ? dtb_arg : filename);
+ high += fdt_size;
+
+ /* Kernel command is at the end, 4k aligned. */
+ boot_info.cmdline = ROUND_UP(high, 4096);
+ if (kernel_cmdline && strlen(kernel_cmdline)) {
+ pstrcpy_targphys("cmdline", boot_info.cmdline, 256, kernel_cmdline);
+ }
+ }
+ g_free(filename);
+}
diff --git a/hw/nios2/boot.h b/hw/nios2/boot.h
new file mode 100644
index 0000000000..3116753818
--- /dev/null
+++ b/hw/nios2/boot.h
@@ -0,0 +1,11 @@
+#ifndef NIOS2_BOOT_H
+#define NIOS2_BOOT_H
+
+#include "hw/hw.h"
+#include "cpu.h"
+
+void nios2_load_kernel(Nios2CPU *cpu, hwaddr ddr_base, uint32_t ramsize,
+ const char *initrd_filename, const char *dtb_filename,
+ void (*machine_cpu_reset)(Nios2CPU *));
+
+#endif /* NIOS2_BOOT_H */
diff --git a/hw/nios2/cpu_pic.c b/hw/nios2/cpu_pic.c
new file mode 100644
index 0000000000..0f95987ef3
--- /dev/null
+++ b/hw/nios2/cpu_pic.c
@@ -0,0 +1,70 @@
+/*
+ * Altera Nios2 CPU PIC
+ *
+ * Copyright (c) 2016 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "cpu.h"
+
+#include "qemu/config-file.h"
+
+#include "boot.h"
+
+static void nios2_pic_cpu_handler(void *opaque, int irq, int level)
+{
+ Nios2CPU *cpu = opaque;
+ CPUNios2State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ int type = irq ? CPU_INTERRUPT_NMI : CPU_INTERRUPT_HARD;
+
+ if (type == CPU_INTERRUPT_HARD) {
+ env->irq_pending = level;
+
+ if (level && (env->regs[CR_STATUS] & CR_STATUS_PIE)) {
+ env->irq_pending = 0;
+ cpu_interrupt(cs, type);
+ } else if (!level) {
+ env->irq_pending = 0;
+ cpu_reset_interrupt(cs, type);
+ }
+ } else {
+ if (level) {
+ cpu_interrupt(cs, type);
+ } else {
+ cpu_reset_interrupt(cs, type);
+ }
+ }
+}
+
+void nios2_check_interrupts(CPUNios2State *env)
+{
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (env->irq_pending) {
+ env->irq_pending = 0;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+qemu_irq *nios2_cpu_pic_init(Nios2CPU *cpu)
+{
+ return qemu_allocate_irqs(nios2_pic_cpu_handler, cpu, 2);
+}
diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c
index 2c16fc23df..848692abc0 100644
--- a/hw/nvram/eeprom93xx.c
+++ b/hw/nvram/eeprom93xx.c
@@ -94,18 +94,22 @@ struct _eeprom_t {
This is a Big hack, but it is how the old state did it.
*/
-static int get_uint16_from_uint8(QEMUFile *f, void *pv, size_t size)
+static int get_uint16_from_uint8(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint16_t *v = pv;
*v = qemu_get_ubyte(f);
return 0;
}
-static void put_unused(QEMUFile *f, void *pv, size_t size)
+static int put_unused(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
fprintf(stderr, "uint16_from_uint8 is used only for backwards compatibility.\n");
fprintf(stderr, "Never should be used to write a new state.\n");
exit(0);
+
+ return 0;
}
static const VMStateInfo vmstate_hack_uint16_from_uint8 = {
diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c
index 3ebecb2260..316fca9bc1 100644
--- a/hw/nvram/fw_cfg.c
+++ b/hw/nvram/fw_cfg.c
@@ -33,6 +33,9 @@
#include "qemu/error-report.h"
#include "qemu/config-file.h"
#include "qemu/cutils.h"
+#include "qapi/error.h"
+
+#define FW_CFG_FILE_SLOTS_DFLT 0x20
#define FW_CFG_NAME "fw_cfg"
#define FW_CFG_PATH "/machine/" FW_CFG_NAME
@@ -54,11 +57,13 @@
#define FW_CFG_DMA_CTL_READ 0x02
#define FW_CFG_DMA_CTL_SKIP 0x04
#define FW_CFG_DMA_CTL_SELECT 0x08
+#define FW_CFG_DMA_CTL_WRITE 0x10
#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
typedef struct FWCfgEntry {
uint32_t len;
+ bool allow_write;
uint8_t *data;
void *callback_opaque;
FWCfgReadCallback read_callback;
@@ -69,8 +74,9 @@ struct FWCfgState {
SysBusDevice parent_obj;
/*< public >*/
- FWCfgEntry entries[2][FW_CFG_MAX_ENTRY];
- int entry_order[FW_CFG_MAX_ENTRY];
+ uint16_t file_slots;
+ FWCfgEntry *entries[2];
+ int *entry_order;
FWCfgFiles *files;
uint16_t cur_entry;
uint32_t cur_offset;
@@ -255,13 +261,24 @@ static void fw_cfg_write(FWCfgState *s, uint8_t value)
/* nothing, write support removed in QEMU v2.4+ */
}
+static inline uint16_t fw_cfg_file_slots(const FWCfgState *s)
+{
+ return s->file_slots;
+}
+
+/* Note: this function returns an exclusive limit. */
+static inline uint32_t fw_cfg_max_entry(const FWCfgState *s)
+{
+ return FW_CFG_FILE_FIRST + fw_cfg_file_slots(s);
+}
+
static int fw_cfg_select(FWCfgState *s, uint16_t key)
{
int arch, ret;
FWCfgEntry *e;
s->cur_offset = 0;
- if ((key & FW_CFG_ENTRY_MASK) >= FW_CFG_MAX_ENTRY) {
+ if ((key & FW_CFG_ENTRY_MASK) >= fw_cfg_max_entry(s)) {
s->cur_entry = FW_CFG_INVALID;
ret = 0;
} else {
@@ -326,7 +343,7 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
FWCfgDmaAccess dma;
int arch;
FWCfgEntry *e;
- int read;
+ int read = 0, write = 0;
dma_addr_t dma_addr;
/* Reset the address before the next access */
@@ -353,8 +370,13 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
if (dma.control & FW_CFG_DMA_CTL_READ) {
read = 1;
+ write = 0;
+ } else if (dma.control & FW_CFG_DMA_CTL_WRITE) {
+ read = 0;
+ write = 1;
} else if (dma.control & FW_CFG_DMA_CTL_SKIP) {
read = 0;
+ write = 0;
} else {
dma.length = 0;
}
@@ -374,7 +396,9 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
dma.control |= FW_CFG_DMA_CTL_ERROR;
}
}
-
+ if (write) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ }
} else {
if (dma.length <= (e->len - s->cur_offset)) {
len = dma.length;
@@ -391,6 +415,14 @@ static void fw_cfg_dma_transfer(FWCfgState *s)
dma.control |= FW_CFG_DMA_CTL_ERROR;
}
}
+ if (write) {
+ if (!e->allow_write ||
+ len != dma.length ||
+ dma_memory_read(s->dma_as, dma.address,
+ &e->data[s->cur_offset], len)) {
+ dma.control |= FW_CFG_DMA_CTL_ERROR;
+ }
+ }
s->cur_offset += len;
}
@@ -523,17 +555,21 @@ static void fw_cfg_reset(DeviceState *d)
Or we broke compatibility in the state, or we can't use struct tm
*/
-static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size)
+static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint32_t *v = pv;
*v = qemu_get_be16(f);
return 0;
}
-static void put_unused(QEMUFile *f, void *pv, size_t size)
+static int put_unused(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
fprintf(stderr, "uint32_as_uint16 is only used for backward compatibility.\n");
fprintf(stderr, "This functions shouldn't be called.\n");
+
+ return 0;
}
static const VMStateInfo vmstate_hack_uint32_as_uint16 = {
@@ -586,19 +622,21 @@ static const VMStateDescription vmstate_fw_cfg = {
static void fw_cfg_add_bytes_read_callback(FWCfgState *s, uint16_t key,
FWCfgReadCallback callback,
void *callback_opaque,
- void *data, size_t len)
+ void *data, size_t len,
+ bool read_only)
{
int arch = !!(key & FW_CFG_ARCH_LOCAL);
key &= FW_CFG_ENTRY_MASK;
- assert(key < FW_CFG_MAX_ENTRY && len < UINT32_MAX);
+ assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
assert(s->entries[arch][key].data == NULL); /* avoid key conflict */
s->entries[arch][key].data = data;
s->entries[arch][key].len = (uint32_t)len;
s->entries[arch][key].read_callback = callback;
s->entries[arch][key].callback_opaque = callback_opaque;
+ s->entries[arch][key].allow_write = !read_only;
}
static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
@@ -609,20 +647,21 @@ static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
key &= FW_CFG_ENTRY_MASK;
- assert(key < FW_CFG_MAX_ENTRY && len < UINT32_MAX);
+ assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
/* return the old data to the function caller, avoid memory leak */
ptr = s->entries[arch][key].data;
s->entries[arch][key].data = data;
s->entries[arch][key].len = len;
s->entries[arch][key].callback_opaque = NULL;
+ s->entries[arch][key].allow_write = false;
return ptr;
}
void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len)
{
- fw_cfg_add_bytes_read_callback(s, key, NULL, NULL, data, len);
+ fw_cfg_add_bytes_read_callback(s, key, NULL, NULL, data, len, true);
}
void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value)
@@ -749,7 +788,7 @@ static int get_fw_cfg_order(FWCfgState *s, const char *name)
void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
FWCfgReadCallback callback, void *callback_opaque,
- void *data, size_t len)
+ void *data, size_t len, bool read_only)
{
int i, index, count;
size_t dsize;
@@ -757,13 +796,13 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
int order = 0;
if (!s->files) {
- dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * FW_CFG_FILE_SLOTS;
+ dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s);
s->files = g_malloc0(dsize);
fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize);
}
count = be32_to_cpu(s->files->count);
- assert(count < FW_CFG_FILE_SLOTS);
+ assert(count < fw_cfg_file_slots(s));
/* Find the insertion point. */
if (mc->legacy_fw_cfg_order) {
@@ -811,7 +850,8 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
}
fw_cfg_add_bytes_read_callback(s, FW_CFG_FILE_FIRST + index,
- callback, callback_opaque, data, len);
+ callback, callback_opaque, data, len,
+ read_only);
s->files->f[index].size = cpu_to_be32(len);
s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
@@ -824,7 +864,7 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
void fw_cfg_add_file(FWCfgState *s, const char *filename,
void *data, size_t len)
{
- fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len);
+ fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len, true);
}
void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
@@ -836,7 +876,7 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
assert(s->files);
index = be32_to_cpu(s->files->count);
- assert(index < FW_CFG_FILE_SLOTS);
+ assert(index < fw_cfg_file_slots(s));
for (i = 0; i < index; i++) {
if (strcmp(filename, s->files->f[i].name) == 0) {
@@ -847,7 +887,7 @@ void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
}
}
/* add new one */
- fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len);
+ fw_cfg_add_file_callback(s, filename, NULL, NULL, data, len, true);
return NULL;
}
@@ -993,12 +1033,38 @@ static const TypeInfo fw_cfg_info = {
.class_init = fw_cfg_class_init,
};
+static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp)
+{
+ uint16_t file_slots_max;
+
+ if (fw_cfg_file_slots(s) < FW_CFG_FILE_SLOTS_MIN) {
+ error_setg(errp, "\"file_slots\" must be at least 0x%x",
+ FW_CFG_FILE_SLOTS_MIN);
+ return;
+ }
+
+ /* (UINT16_MAX & FW_CFG_ENTRY_MASK) is the highest inclusive selector value
+ * that we permit. The actual (exclusive) value coming from the
+ * configuration is (FW_CFG_FILE_FIRST + fw_cfg_file_slots(s)). */
+ file_slots_max = (UINT16_MAX & FW_CFG_ENTRY_MASK) - FW_CFG_FILE_FIRST + 1;
+ if (fw_cfg_file_slots(s) > file_slots_max) {
+ error_setg(errp, "\"file_slots\" must not exceed 0x%" PRIx16,
+ file_slots_max);
+ return;
+ }
+
+ s->entries[0] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
+ s->entries[1] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
+ s->entry_order = g_new0(int, fw_cfg_max_entry(s));
+}
static Property fw_cfg_io_properties[] = {
DEFINE_PROP_UINT32("iobase", FWCfgIoState, iobase, -1),
DEFINE_PROP_UINT32("dma_iobase", FWCfgIoState, dma_iobase, -1),
DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled,
true),
+ DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots,
+ FW_CFG_FILE_SLOTS_DFLT),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1006,6 +1072,13 @@ static void fw_cfg_io_realize(DeviceState *dev, Error **errp)
{
FWCfgIoState *s = FW_CFG_IO(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ Error *local_err = NULL;
+
+ fw_cfg_file_slots_allocate(FW_CFG(s), &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
/* when using port i/o, the 8-bit data register ALWAYS overlaps
* with half of the 16-bit control register. Hence, the total size
@@ -1042,6 +1115,8 @@ static Property fw_cfg_mem_properties[] = {
DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1),
DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled,
true),
+ DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots,
+ FW_CFG_FILE_SLOTS_DFLT),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1050,6 +1125,13 @@ static void fw_cfg_mem_realize(DeviceState *dev, Error **errp)
FWCfgMemState *s = FW_CFG_MEM(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
const MemoryRegionOps *data_ops = &fw_cfg_data_mem_ops;
+ Error *local_err = NULL;
+
+ fw_cfg_file_slots_allocate(FW_CFG(s), &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops,
FW_CFG(s), "fwcfg.ctl", FW_CFG_CTL_SIZE);
diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c
index c8b5ac4207..0eef87a4f8 100644
--- a/hw/pci-bridge/ioh3420.c
+++ b/hw/pci-bridge/ioh3420.c
@@ -135,8 +135,10 @@ static int ioh3420_initfn(PCIDevice *d)
goto err_pcie_cap;
}
- rc = pcie_aer_init(d, IOH_EP_AER_OFFSET, PCI_ERR_SIZEOF);
+ rc = pcie_aer_init(d, PCI_ERR_VER, IOH_EP_AER_OFFSET,
+ PCI_ERR_SIZEOF, &err);
if (rc < 0) {
+ error_report_err(err);
goto err;
}
pcie_aer_root_init(d);
@@ -178,7 +180,7 @@ static const VMStateDescription vmstate_ioh3420 = {
.minimum_version_id = 1,
.post_load = pcie_cap_slot_post_load,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot),
+ VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot),
VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log,
PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog),
VMSTATE_END_OF_LIST()
diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c
index cef6e1325e..cfe8a3657f 100644
--- a/hw/pci-bridge/xio3130_downstream.c
+++ b/hw/pci-bridge/xio3130_downstream.c
@@ -97,8 +97,10 @@ static int xio3130_downstream_initfn(PCIDevice *d)
goto err_pcie_cap;
}
- rc = pcie_aer_init(d, XIO3130_AER_OFFSET, PCI_ERR_SIZEOF);
+ rc = pcie_aer_init(d, PCI_ERR_VER, XIO3130_AER_OFFSET,
+ PCI_ERR_SIZEOF, &err);
if (rc < 0) {
+ error_report_err(err);
goto err;
}
@@ -164,7 +166,7 @@ static const VMStateDescription vmstate_xio3130_downstream = {
.minimum_version_id = 1,
.post_load = pcie_cap_slot_post_load,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot),
+ VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot),
VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log,
PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog),
VMSTATE_END_OF_LIST()
diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c
index 4ad0440aa1..401c78452b 100644
--- a/hw/pci-bridge/xio3130_upstream.c
+++ b/hw/pci-bridge/xio3130_upstream.c
@@ -85,8 +85,10 @@ static int xio3130_upstream_initfn(PCIDevice *d)
pcie_cap_flr_init(d);
pcie_cap_deverr_init(d);
- rc = pcie_aer_init(d, XIO3130_AER_OFFSET, PCI_ERR_SIZEOF);
+ rc = pcie_aer_init(d, PCI_ERR_VER, XIO3130_AER_OFFSET,
+ PCI_ERR_SIZEOF, &err);
if (rc < 0) {
+ error_report_err(err);
goto err;
}
@@ -136,7 +138,7 @@ static const VMStateDescription vmstate_xio3130_upstream = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj.parent_obj, PCIEPort),
+ VMSTATE_PCI_DEVICE(parent_obj.parent_obj, PCIEPort),
VMSTATE_STRUCT(parent_obj.parent_obj.exp.aer_log, PCIEPort, 0,
vmstate_pcie_aer_log, PCIEAERLog),
VMSTATE_END_OF_LIST()
diff --git a/hw/pci/msix.c b/hw/pci/msix.c
index 0ec1cb14fc..ee1714d2cf 100644
--- a/hw/pci/msix.c
+++ b/hw/pci/msix.c
@@ -587,12 +587,16 @@ void msix_unset_vector_notifiers(PCIDevice *dev)
dev->msix_vector_poll_notifier = NULL;
}
-static void put_msix_state(QEMUFile *f, void *pv, size_t size)
+static int put_msix_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
msix_save(pv, f);
+
+ return 0;
}
-static int get_msix_state(QEMUFile *f, void *pv, size_t size)
+static int get_msix_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
msix_load(pv, f);
return 0;
diff --git a/hw/pci/pci.c b/hw/pci/pci.c
index 24fae1689d..47ca3af69a 100644
--- a/hw/pci/pci.c
+++ b/hw/pci/pci.c
@@ -445,7 +445,8 @@ int pci_bus_numa_node(PCIBus *bus)
return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
}
-static int get_pci_config_device(QEMUFile *f, void *pv, size_t size)
+static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
PCIDevice *s = container_of(pv, PCIDevice, config);
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(s);
@@ -484,11 +485,14 @@ static int get_pci_config_device(QEMUFile *f, void *pv, size_t size)
}
/* just put buffer */
-static void put_pci_config_device(QEMUFile *f, void *pv, size_t size)
+static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
const uint8_t **v = pv;
assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
qemu_put_buffer(f, *v, size);
+
+ return 0;
}
static VMStateInfo vmstate_info_pci_config = {
@@ -497,7 +501,8 @@ static VMStateInfo vmstate_info_pci_config = {
.put = put_pci_config_device,
};
-static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size)
+static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
PCIDevice *s = container_of(pv, PCIDevice, irq_state);
uint32_t irq_state[PCI_NUM_PINS];
@@ -518,7 +523,8 @@ static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_pci_irq_state(QEMUFile *f, void *pv, size_t size)
+static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
int i;
PCIDevice *s = container_of(pv, PCIDevice, irq_state);
@@ -526,6 +532,8 @@ static void put_pci_irq_state(QEMUFile *f, void *pv, size_t size)
for (i = 0; i < PCI_NUM_PINS; ++i) {
qemu_put_be32(f, pci_irq_state(s, i));
}
+
+ return 0;
}
static VMStateInfo vmstate_info_pci_irq_state = {
@@ -534,30 +542,29 @@ static VMStateInfo vmstate_info_pci_irq_state = {
.put = put_pci_irq_state,
};
+static bool migrate_is_pcie(void *opaque, int version_id)
+{
+ return pci_is_express((PCIDevice *)opaque);
+}
+
+static bool migrate_is_not_pcie(void *opaque, int version_id)
+{
+ return !pci_is_express((PCIDevice *)opaque);
+}
+
const VMStateDescription vmstate_pci_device = {
.name = "PCIDevice",
.version_id = 2,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
- VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
- vmstate_info_pci_config,
+ VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
+ migrate_is_not_pcie,
+ 0, vmstate_info_pci_config,
PCI_CONFIG_SPACE_SIZE),
- VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
- vmstate_info_pci_irq_state,
- PCI_NUM_PINS * sizeof(int32_t)),
- VMSTATE_END_OF_LIST()
- }
-};
-
-const VMStateDescription vmstate_pcie_device = {
- .name = "PCIEDevice",
- .version_id = 2,
- .minimum_version_id = 1,
- .fields = (VMStateField[]) {
- VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
- VMSTATE_BUFFER_UNSAFE_INFO(config, PCIDevice, 0,
- vmstate_info_pci_config,
+ VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
+ migrate_is_pcie,
+ 0, vmstate_info_pci_config,
PCIE_CONFIG_SPACE_SIZE),
VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
vmstate_info_pci_irq_state,
@@ -566,10 +573,6 @@ const VMStateDescription vmstate_pcie_device = {
}
};
-static inline const VMStateDescription *pci_get_vmstate(PCIDevice *s)
-{
- return pci_is_express(s) ? &vmstate_pcie_device : &vmstate_pci_device;
-}
void pci_device_save(PCIDevice *s, QEMUFile *f)
{
@@ -578,7 +581,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f)
* This makes us compatible with old devices
* which never set or clear this bit. */
s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
- vmstate_save_state(f, pci_get_vmstate(s), s, NULL);
+ vmstate_save_state(f, &vmstate_pci_device, s, NULL);
/* Restore the interrupt status bit. */
pci_update_irq_status(s);
}
@@ -586,7 +589,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f)
int pci_device_load(PCIDevice *s, QEMUFile *f)
{
int ret;
- ret = vmstate_load_state(f, pci_get_vmstate(s), s, s->version_id);
+ ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
/* Restore the interrupt status bit. */
pci_update_irq_status(s);
return ret;
@@ -982,8 +985,8 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
pci_get_function_0(pci_dev)) {
error_setg(errp, "PCI: slot %d function 0 already ocuppied by %s,"
" new func %s cannot be exposed to guest.",
- PCI_SLOT(devfn),
- bus->devices[PCI_DEVFN(PCI_SLOT(devfn), 0)]->name,
+ PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
+ pci_get_function_0(pci_dev)->name,
name);
return NULL;
@@ -1779,7 +1782,6 @@ PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
const char *default_devaddr)
{
const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr;
- Error *err = NULL;
PCIBus *bus;
PCIDevice *pci_dev;
DeviceState *dev;
@@ -1805,13 +1807,7 @@ PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
pci_dev = pci_create(bus, devfn, pci_nic_names[i]);
dev = &pci_dev->qdev;
qdev_set_nic_properties(dev, nd);
-
- object_property_set_bool(OBJECT(dev), true, "realized", &err);
- if (err) {
- error_report_err(err);
- object_unparent(OBJECT(dev));
- exit(1);
- }
+ qdev_init_nofail(dev);
return pci_dev;
}
diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c
index 99cfb4561b..cbd4bb4f8c 100644
--- a/hw/pci/pcie.c
+++ b/hw/pci/pcie.c
@@ -656,7 +656,7 @@ static void pcie_ext_cap_set_next(PCIDevice *dev, uint16_t pos, uint16_t next)
}
/*
- * caller must supply valid (offset, size) * such that the range shouldn't
+ * Caller must supply valid (offset, size) such that the range wouldn't
* overlap with other capability or other registers.
* This function doesn't check it.
*/
@@ -717,3 +717,18 @@ void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num)
PCI_EXT_CAP_DSN_SIZEOF);
pci_set_quad(dev->config + offset + pci_dsn_cap, ser_num);
}
+
+void pcie_ats_init(PCIDevice *dev, uint16_t offset)
+{
+ pcie_add_capability(dev, PCI_EXT_CAP_ID_ATS, 0x1,
+ offset, PCI_EXT_CAP_ATS_SIZEOF);
+
+ dev->exp.ats_cap = offset;
+
+ /* Invalidate Queue Depth 0, Page Aligned Request 0 */
+ pci_set_word(dev->config + offset + PCI_ATS_CAP, 0);
+ /* STU 0, Disabled by default */
+ pci_set_word(dev->config + offset + PCI_ATS_CTRL, 0);
+
+ pci_set_word(dev->wmask + dev->exp.ats_cap + PCI_ATS_CTRL, 0x800f);
+}
diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c
index 048ce6a424..daf1f65427 100644
--- a/hw/pci/pcie_aer.c
+++ b/hw/pci/pcie_aer.c
@@ -29,6 +29,7 @@
#include "hw/pci/msi.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci/pcie_regs.h"
+#include "qapi/error.h"
//#define DEBUG_PCIE
#ifdef DEBUG_PCIE
@@ -96,21 +97,17 @@ static void aer_log_clear_all_err(PCIEAERLog *aer_log)
aer_log->log_num = 0;
}
-int pcie_aer_init(PCIDevice *dev, uint16_t offset, uint16_t size)
+int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset,
+ uint16_t size, Error **errp)
{
- PCIExpressDevice *exp;
-
- pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER,
+ pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, cap_ver,
offset, size);
- exp = &dev->exp;
- exp->aer_cap = offset;
+ dev->exp.aer_cap = offset;
- /* log_max is property */
- if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) {
- dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT;
- }
- /* clip down the value to avoid unreasobale memory usage */
+ /* clip down the value to avoid unreasonable memory usage */
if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) {
+ error_setg(errp, "Invalid aer_log_max %d. The max number of aer log "
+ "is %d", dev->exp.aer_log.log_max, PCIE_AER_LOG_MAX_LIMIT);
return -EINVAL;
}
dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] *
diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c
index 3dcd472eba..42fafac91b 100644
--- a/hw/pci/shpc.c
+++ b/hw/pci/shpc.c
@@ -695,13 +695,16 @@ void shpc_cap_write_config(PCIDevice *d, uint32_t addr, uint32_t val, int l)
shpc_cap_update_dword(d);
}
-static void shpc_save(QEMUFile *f, void *pv, size_t size)
+static int shpc_save(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
PCIDevice *d = container_of(pv, PCIDevice, shpc);
qemu_put_buffer(f, d->shpc->config, SHPC_SIZEOF(d));
+
+ return 0;
}
-static int shpc_load(QEMUFile *f, void *pv, size_t size)
+static int shpc_load(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
PCIDevice *d = container_of(pv, PCIDevice, shpc);
int ret = qemu_get_buffer(f, d->shpc->config, SHPC_SIZEOF(d));
diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c
index b82af4f086..38bc85f117 100644
--- a/hw/ppc/pnv_xscom.c
+++ b/hw/ppc/pnv_xscom.c
@@ -20,7 +20,7 @@
#include "qapi/error.h"
#include "hw/hw.h"
#include "qemu/log.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "target/ppc/cpu.h"
#include "hw/sysbus.h"
diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index cf958a9e00..eb219abdff 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -29,9 +29,9 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "sysemu/sysemu.h"
#include "hw/sysbus.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
+#include "sysemu/sysemu.h"
#include "e500.h"
#define MAX_CPUS 32
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 208ef7b110..a642e663d4 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -36,7 +36,7 @@
#include "sysemu/device_tree.h"
#include "sysemu/block-backend.h"
#include "sysemu/cpus.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "kvm_ppc.h"
#include "migration/migration.h"
#include "mmu-hash64.h"
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index a0c44ee593..2de6377cca 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -59,7 +59,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
trace_spapr_drc_set_isolation_state(get_index(drc), state);
if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
- /* cannot unisolate a non-existant resource, and, or resources
+ /* cannot unisolate a non-existent resource, and, or resources
* which are in an 'UNUSABLE' allocation state. (PAPR 2.7, 13.5.3.5)
*/
if (!drc->dev ||
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 9a9bedf1bd..b2a8e48569 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -1,5 +1,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/sysemu.h"
#include "qemu/log.h"
#include "cpu.h"
@@ -9,7 +10,6 @@
#include "mmu-hash64.h"
#include "cpu-models.h"
#include "trace.h"
-#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "hw/ppc/spapr_ovec.h"
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c
index 63f6248f1d..69b0291e8a 100644
--- a/hw/s390x/s390-pci-bus.c
+++ b/hw/s390x/s390-pci-bus.c
@@ -19,6 +19,7 @@
#include "s390-pci-bus.h"
#include "s390-pci-inst.h"
#include "hw/pci/pci_bus.h"
+#include "hw/pci/pci_bridge.h"
#include "hw/pci/msi.h"
#include "qemu/error-report.h"
@@ -31,7 +32,7 @@
do { } while (0)
#endif
-static S390pciState *s390_get_phb(void)
+S390pciState *s390_get_phb(void)
{
static S390pciState *phb;
@@ -91,35 +92,25 @@ int chsc_sei_nt2_have_event(void)
return !QTAILQ_EMPTY(&s->pending_sei);
}
-S390PCIBusDevice *s390_pci_find_next_avail_dev(S390PCIBusDevice *pbdev)
+S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
+ S390PCIBusDevice *pbdev)
{
- int idx = 0;
- S390PCIBusDevice *dev = NULL;
- S390pciState *s = s390_get_phb();
-
- if (pbdev) {
- idx = (pbdev->fh & FH_MASK_INDEX) + 1;
- }
+ S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) :
+ QTAILQ_FIRST(&s->zpci_devs);
- for (; idx < PCI_SLOT_MAX; idx++) {
- dev = s->pbdev[idx];
- if (dev && dev->state != ZPCI_FS_RESERVED) {
- return dev;
- }
+ while (ret && ret->state == ZPCI_FS_RESERVED) {
+ ret = QTAILQ_NEXT(ret, link);
}
- return NULL;
+ return ret;
}
-S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid)
+S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid)
{
S390PCIBusDevice *pbdev;
- int i;
- S390pciState *s = s390_get_phb();
- for (i = 0; i < PCI_SLOT_MAX; i++) {
- pbdev = s->pbdev[i];
- if (pbdev && pbdev->fid == fid) {
+ QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
+ if (pbdev->fid == fid) {
return pbdev;
}
}
@@ -130,7 +121,8 @@ S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid)
void s390_pci_sclp_configure(SCCB *sccb)
{
PciCfgSccb *psccb = (PciCfgSccb *)sccb;
- S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(be32_to_cpu(psccb->aid));
+ S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
+ be32_to_cpu(psccb->aid));
uint16_t rc;
if (be16_to_cpu(sccb->h.length) < 16) {
@@ -162,7 +154,8 @@ out:
void s390_pci_sclp_deconfigure(SCCB *sccb)
{
PciCfgSccb *psccb = (PciCfgSccb *)sccb;
- S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(be32_to_cpu(psccb->aid));
+ S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
+ be32_to_cpu(psccb->aid));
uint16_t rc;
if (be16_to_cpu(sccb->h.length) < 16) {
@@ -187,8 +180,8 @@ void s390_pci_sclp_deconfigure(SCCB *sccb)
if (pbdev->summary_ind) {
pci_dereg_irqs(pbdev);
}
- if (pbdev->iommu_enabled) {
- pci_dereg_ioat(pbdev);
+ if (pbdev->iommu->enabled) {
+ pci_dereg_ioat(pbdev->iommu);
}
pbdev->state = ZPCI_FS_STANDBY;
rc = SCLP_RC_NORMAL_COMPLETION;
@@ -201,18 +194,11 @@ out:
psccb->header.response_code = cpu_to_be16(rc);
}
-static S390PCIBusDevice *s390_pci_find_dev_by_uid(uint16_t uid)
+static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid)
{
- int i;
S390PCIBusDevice *pbdev;
- S390pciState *s = s390_get_phb();
-
- for (i = 0; i < PCI_SLOT_MAX; i++) {
- pbdev = s->pbdev[i];
- if (!pbdev) {
- continue;
- }
+ QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
if (pbdev->uid == uid) {
return pbdev;
}
@@ -221,22 +207,16 @@ static S390PCIBusDevice *s390_pci_find_dev_by_uid(uint16_t uid)
return NULL;
}
-static S390PCIBusDevice *s390_pci_find_dev_by_target(const char *target)
+static S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
+ const char *target)
{
- int i;
S390PCIBusDevice *pbdev;
- S390pciState *s = s390_get_phb();
if (!target) {
return NULL;
}
- for (i = 0; i < PCI_SLOT_MAX; i++) {
- pbdev = s->pbdev[i];
- if (!pbdev) {
- continue;
- }
-
+ QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
if (!strcmp(pbdev->target, target)) {
return pbdev;
}
@@ -245,19 +225,16 @@ static S390PCIBusDevice *s390_pci_find_dev_by_target(const char *target)
return NULL;
}
-S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx)
+S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx)
{
- S390pciState *s = s390_get_phb();
-
- return s->pbdev[idx & FH_MASK_INDEX];
+ return g_hash_table_lookup(s->zpci_table, &idx);
}
-S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh)
+S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh)
{
- S390pciState *s = s390_get_phb();
- S390PCIBusDevice *pbdev;
+ uint32_t idx = FH_MASK_INDEX & fh;
+ S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx);
- pbdev = s->pbdev[fh & FH_MASK_INDEX];
if (pbdev && pbdev->fh == fh) {
return pbdev;
}
@@ -377,12 +354,12 @@ out:
return pte;
}
-static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
+static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *mr, hwaddr addr,
bool is_write)
{
uint64_t pte;
uint32_t flags;
- S390PCIBusDevice *pbdev = container_of(iommu, S390PCIBusDevice, iommu_mr);
+ S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
IOMMUTLBEntry ret = {
.target_as = &address_space_memory,
.iova = 0,
@@ -391,10 +368,10 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
.perm = IOMMU_NONE,
};
- switch (pbdev->state) {
+ switch (iommu->pbdev->state) {
case ZPCI_FS_ENABLED:
case ZPCI_FS_BLOCKED:
- if (!pbdev->iommu_enabled) {
+ if (!iommu->enabled) {
return ret;
}
break;
@@ -404,11 +381,11 @@ static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
- if (addr < pbdev->pba || addr > pbdev->pal) {
+ if (addr < iommu->pba || addr > iommu->pal) {
return ret;
}
- pte = s390_guest_io_table_walk(s390_pci_get_table_origin(pbdev->g_iota),
+ pte = s390_guest_io_table_walk(s390_pci_get_table_origin(iommu->g_iota),
addr);
if (!pte) {
return ret;
@@ -432,11 +409,48 @@ static const MemoryRegionIOMMUOps s390_iommu_ops = {
.translate = s390_translate_iommu,
};
+static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
+ int devfn)
+{
+ uint64_t key = (uintptr_t)bus;
+ S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
+ S390PCIIOMMU *iommu;
+
+ if (!table) {
+ table = g_malloc0(sizeof(S390PCIIOMMUTable));
+ table->key = key;
+ g_hash_table_insert(s->iommu_table, &table->key, table);
+ }
+
+ iommu = table->iommu[PCI_SLOT(devfn)];
+ if (!iommu) {
+ iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU));
+
+ char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x",
+ pci_bus_num(bus),
+ PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
+ char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x",
+ pci_bus_num(bus),
+ PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
+ memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX);
+ address_space_init(&iommu->as, &iommu->mr, as_name);
+ table->iommu[PCI_SLOT(devfn)] = iommu;
+
+ g_free(mr_name);
+ g_free(as_name);
+ }
+
+ return iommu;
+}
+
static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
{
S390pciState *s = opaque;
+ S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn);
- return &s->iommu[PCI_SLOT(devfn)]->as;
+ return &iommu->as;
}
static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
@@ -503,34 +517,38 @@ static const MemoryRegionOps s390_msi_ctrl_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-void s390_pci_iommu_enable(S390PCIBusDevice *pbdev)
+void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
{
- memory_region_init_iommu(&pbdev->iommu_mr, OBJECT(&pbdev->iommu->mr),
- &s390_iommu_ops, "iommu-s390", pbdev->pal + 1);
- memory_region_add_subregion(&pbdev->iommu->mr, 0, &pbdev->iommu_mr);
- pbdev->iommu_enabled = true;
+ char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
+ memory_region_init_iommu(&iommu->iommu_mr, OBJECT(&iommu->mr),
+ &s390_iommu_ops, name, iommu->pal + 1);
+ iommu->enabled = true;
+ memory_region_add_subregion(&iommu->mr, 0, &iommu->iommu_mr);
+ g_free(name);
}
-void s390_pci_iommu_disable(S390PCIBusDevice *pbdev)
+void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
{
- memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->iommu_mr);
- object_unparent(OBJECT(&pbdev->iommu_mr));
- pbdev->iommu_enabled = false;
+ iommu->enabled = false;
+ memory_region_del_subregion(&iommu->mr, &iommu->iommu_mr);
+ object_unparent(OBJECT(&iommu->iommu_mr));
}
-static void s390_pcihost_init_as(S390pciState *s)
+static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
{
- int i;
- S390PCIIOMMU *iommu;
+ uint64_t key = (uintptr_t)bus;
+ S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
+ S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL;
- for (i = 0; i < PCI_SLOT_MAX; i++) {
- iommu = g_malloc0(sizeof(S390PCIIOMMU));
- memory_region_init(&iommu->mr, OBJECT(s),
- "iommu-root-s390", UINT64_MAX);
- address_space_init(&iommu->as, &iommu->mr, "iommu-pci");
-
- s->iommu[i] = iommu;
+ if (!table || !iommu) {
+ return;
}
+
+ table->iommu[PCI_SLOT(devfn)] = NULL;
+ address_space_destroy(&iommu->as);
+ object_unparent(OBJECT(&iommu->mr));
+ object_unparent(OBJECT(iommu));
+ object_unref(OBJECT(iommu));
}
static int s390_pcihost_init(SysBusDevice *dev)
@@ -546,7 +564,6 @@ static int s390_pcihost_init(SysBusDevice *dev)
s390_pci_set_irq, s390_pci_map_irq, NULL,
get_system_memory(), get_system_io(), 0, 64,
TYPE_PCI_BUS);
- s390_pcihost_init_as(s);
pci_setup_iommu(b, s390_pci_dma_iommu, s);
bus = BUS(b);
@@ -556,12 +573,18 @@ static int s390_pcihost_init(SysBusDevice *dev)
s->bus = S390_PCI_BUS(qbus_create(TYPE_S390_PCI_BUS, DEVICE(s), NULL));
qbus_set_hotplug_handler(BUS(s->bus), DEVICE(s), NULL);
+ s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal,
+ NULL, g_free);
+ s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL);
+ s->bus_no = 0;
QTAILQ_INIT(&s->pending_sei);
+ QTAILQ_INIT(&s->zpci_devs);
return 0;
}
-static int s390_pci_setup_msix(S390PCIBusDevice *pbdev)
+static int s390_pci_msix_init(S390PCIBusDevice *pbdev)
{
+ char *name;
uint8_t pos;
uint16_t ctrl;
uint32_t table, pba;
@@ -569,7 +592,7 @@ static int s390_pci_setup_msix(S390PCIBusDevice *pbdev)
pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
if (!pos) {
pbdev->msix.available = false;
- return 0;
+ return -1;
}
ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS,
@@ -585,21 +608,15 @@ static int s390_pci_setup_msix(S390PCIBusDevice *pbdev)
pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
pbdev->msix.available = true;
- return 0;
-}
-
-static void s390_pci_msix_init(S390PCIBusDevice *pbdev)
-{
- char *name;
name = g_strdup_printf("msix-s390-%04x", pbdev->uid);
-
memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev),
&s390_msi_ctrl_ops, pbdev, name, PAGE_SIZE);
memory_region_add_subregion(&pbdev->iommu->mr, ZPCI_MSI_ADDR,
&pbdev->msix_notify_mr);
-
g_free(name);
+
+ return 0;
}
static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
@@ -608,10 +625,10 @@ static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
object_unparent(OBJECT(&pbdev->msix_notify_mr));
}
-static S390PCIBusDevice *s390_pci_device_new(const char *target)
+static S390PCIBusDevice *s390_pci_device_new(S390pciState *s,
+ const char *target)
{
DeviceState *dev = NULL;
- S390pciState *s = s390_get_phb();
dev = qdev_try_create(BUS(s->bus), TYPE_S390_PCI_DEVICE);
if (!dev) {
@@ -624,6 +641,24 @@ static S390PCIBusDevice *s390_pci_device_new(const char *target)
return S390_PCI_DEVICE(dev);
}
+static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev)
+{
+ uint32_t idx;
+
+ idx = s->next_idx;
+ while (s390_pci_find_dev_by_idx(s, idx)) {
+ idx = (idx + 1) & FH_MASK_INDEX;
+ if (idx == s->next_idx) {
+ return false;
+ }
+ }
+
+ pbdev->idx = idx;
+ s->next_idx = (idx + 1) & FH_MASK_INDEX;
+
+ return true;
+}
+
static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@@ -631,7 +666,28 @@ static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
S390PCIBusDevice *pbdev = NULL;
S390pciState *s = s390_get_phb();
- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
+ BusState *bus;
+ PCIBridge *pb = PCI_BRIDGE(dev);
+ PCIDevice *pdev = PCI_DEVICE(dev);
+
+ pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq);
+ pci_setup_iommu(&pb->sec_bus, s390_pci_dma_iommu, s);
+
+ bus = BUS(&pb->sec_bus);
+ qbus_set_hotplug_handler(bus, DEVICE(s), errp);
+
+ if (dev->hotplugged) {
+ pci_default_write_config(pdev, PCI_PRIMARY_BUS, s->bus_no, 1);
+ s->bus_no += 1;
+ pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
+ do {
+ pdev = pdev->bus->parent_dev;
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS,
+ s->bus_no, 1);
+ } while (pdev->bus && pci_bus_num(pdev->bus));
+ }
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
pdev = PCI_DEVICE(dev);
if (!dev->id) {
@@ -643,9 +699,9 @@ static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
PCI_FUNC(pdev->devfn));
}
- pbdev = s390_pci_find_dev_by_target(dev->id);
+ pbdev = s390_pci_find_dev_by_target(s, dev->id);
if (!pbdev) {
- pbdev = s390_pci_device_new(dev->id);
+ pbdev = s390_pci_device_new(s, dev->id);
if (!pbdev) {
error_setg(errp, "create zpci device failed");
return;
@@ -659,29 +715,30 @@ static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
}
pbdev->pdev = pdev;
- pbdev->iommu = s->iommu[PCI_SLOT(pdev->devfn)];
+ pbdev->iommu = s390_pci_get_iommu(s, pdev->bus, pdev->devfn);
+ pbdev->iommu->pbdev = pbdev;
pbdev->state = ZPCI_FS_STANDBY;
- s390_pci_msix_init(pbdev);
- s390_pci_setup_msix(pbdev);
+ if (s390_pci_msix_init(pbdev)) {
+ error_setg(errp, "MSI-X support is mandatory "
+ "in the S390 architecture");
+ return;
+ }
if (dev->hotplugged) {
s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY,
pbdev->fh, pbdev->fid);
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
- int idx;
-
pbdev = S390_PCI_DEVICE(dev);
- for (idx = 0; idx < PCI_SLOT_MAX; idx++) {
- if (!s->pbdev[idx]) {
- s->pbdev[idx] = pbdev;
- pbdev->fh = idx;
- return;
- }
- }
- error_setg(errp, "no slot for plugging zpci device");
+ if (!s390_pci_alloc_idx(s, pbdev)) {
+ error_setg(errp, "no slot for plugging zpci device");
+ return;
+ }
+ pbdev->fh = pbdev->idx;
+ QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link);
+ g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
}
}
@@ -692,8 +749,8 @@ static void s390_pcihost_timer_cb(void *opaque)
if (pbdev->summary_ind) {
pci_dereg_irqs(pbdev);
}
- if (pbdev->iommu_enabled) {
- pci_dereg_ioat(pbdev);
+ if (pbdev->iommu->enabled) {
+ pci_dereg_ioat(pbdev->iommu);
}
pbdev->state = ZPCI_FS_STANDBY;
@@ -705,17 +762,20 @@ static void s390_pcihost_timer_cb(void *opaque)
static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
- int i;
PCIDevice *pci_dev = NULL;
+ PCIBus *bus;
+ int32_t devfn;
S390PCIBusDevice *pbdev = NULL;
S390pciState *s = s390_get_phb();
- if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
+ error_setg(errp, "PCI bridge hot unplug currently not supported");
+ return;
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
pci_dev = PCI_DEVICE(dev);
- for (i = 0 ; i < PCI_SLOT_MAX; i++) {
- if (s->pbdev[i] && s->pbdev[i]->pdev == pci_dev) {
- pbdev = s->pbdev[i];
+ QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
+ if (pbdev->pdev == pci_dev) {
break;
}
}
@@ -749,16 +809,58 @@ static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev,
s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
pbdev->fh, pbdev->fid);
+ bus = pci_dev->bus;
+ devfn = pci_dev->devfn;
object_unparent(OBJECT(pci_dev));
s390_pci_msix_free(pbdev);
+ s390_pci_iommu_free(s, bus, devfn);
pbdev->pdev = NULL;
pbdev->state = ZPCI_FS_RESERVED;
out:
pbdev->fid = 0;
- s->pbdev[pbdev->fh & FH_MASK_INDEX] = NULL;
+ QTAILQ_REMOVE(&s->zpci_devs, pbdev, link);
+ g_hash_table_remove(s->zpci_table, &pbdev->idx);
object_unparent(OBJECT(pbdev));
}
+static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
+ void *opaque)
+{
+ S390pciState *s = opaque;
+ unsigned int primary = s->bus_no;
+ unsigned int subordinate = 0xff;
+ PCIBus *sec_bus = NULL;
+
+ if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
+ PCI_HEADER_TYPE_BRIDGE)) {
+ return;
+ }
+
+ (s->bus_no)++;
+ pci_default_write_config(pdev, PCI_PRIMARY_BUS, primary, 1);
+ pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
+
+ sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
+ if (!sec_bus) {
+ return;
+ }
+
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, subordinate, 1);
+ pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
+ s390_pci_enumerate_bridge, s);
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
+}
+
+static void s390_pcihost_reset(DeviceState *dev)
+{
+ S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
+ PCIBus *bus = s->parent_obj.bus;
+
+ s->bus_no = 0;
+ pci_for_each_device(bus, pci_bus_num(bus), s390_pci_enumerate_bridge, s);
+}
+
static void s390_pcihost_class_init(ObjectClass *klass, void *data)
{
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
@@ -766,6 +868,7 @@ static void s390_pcihost_class_init(ObjectClass *klass, void *data)
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
dc->cannot_instantiate_with_device_add_yet = true;
+ dc->reset = s390_pcihost_reset;
k->init = s390_pcihost_init;
hc->plug = s390_pcihost_hot_plug;
hc->unplug = s390_pcihost_hot_unplug;
@@ -789,13 +892,13 @@ static const TypeInfo s390_pcibus_info = {
.instance_size = sizeof(S390PCIBus),
};
-static uint16_t s390_pci_generate_uid(void)
+static uint16_t s390_pci_generate_uid(S390pciState *s)
{
uint16_t uid = 0;
do {
uid++;
- if (!s390_pci_find_dev_by_uid(uid)) {
+ if (!s390_pci_find_dev_by_uid(s, uid)) {
return uid;
}
} while (uid < ZPCI_MAX_UID);
@@ -803,12 +906,12 @@ static uint16_t s390_pci_generate_uid(void)
return UID_UNDEFINED;
}
-static uint32_t s390_pci_generate_fid(Error **errp)
+static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp)
{
uint32_t fid = 0;
do {
- if (!s390_pci_find_dev_by_fid(fid)) {
+ if (!s390_pci_find_dev_by_fid(s, fid)) {
return fid;
}
} while (fid++ != ZPCI_MAX_FID);
@@ -820,25 +923,26 @@ static uint32_t s390_pci_generate_fid(Error **errp)
static void s390_pci_device_realize(DeviceState *dev, Error **errp)
{
S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev);
+ S390pciState *s = s390_get_phb();
if (!zpci->target) {
error_setg(errp, "target must be defined");
return;
}
- if (s390_pci_find_dev_by_target(zpci->target)) {
+ if (s390_pci_find_dev_by_target(s, zpci->target)) {
error_setg(errp, "target %s already has an associated zpci device",
zpci->target);
return;
}
if (zpci->uid == UID_UNDEFINED) {
- zpci->uid = s390_pci_generate_uid();
+ zpci->uid = s390_pci_generate_uid(s);
if (!zpci->uid) {
error_setg(errp, "no free uid could be found");
return;
}
- } else if (s390_pci_find_dev_by_uid(zpci->uid)) {
+ } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) {
error_setg(errp, "uid %u already in use", zpci->uid);
return;
}
@@ -846,12 +950,12 @@ static void s390_pci_device_realize(DeviceState *dev, Error **errp)
if (!zpci->fid_defined) {
Error *local_error = NULL;
- zpci->fid = s390_pci_generate_fid(&local_error);
+ zpci->fid = s390_pci_generate_fid(s, &local_error);
if (local_error) {
error_propagate(errp, local_error);
return;
}
- } else if (s390_pci_find_dev_by_fid(zpci->fid)) {
+ } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) {
error_setg(errp, "fid %u already in use", zpci->fid);
return;
}
@@ -877,8 +981,8 @@ static void s390_pci_device_reset(DeviceState *dev)
if (pbdev->summary_ind) {
pci_dereg_irqs(pbdev);
}
- if (pbdev->iommu_enabled) {
- pci_dereg_ioat(pbdev);
+ if (pbdev->iommu->enabled) {
+ pci_dereg_ioat(pbdev->iommu);
}
pbdev->fmb_addr = 0;
@@ -944,11 +1048,18 @@ static const TypeInfo s390_pci_device_info = {
.class_init = s390_pci_device_class_init,
};
+static TypeInfo s390_pci_iommu_info = {
+ .name = TYPE_S390_PCI_IOMMU,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(S390PCIIOMMU),
+};
+
static void s390_pci_register_types(void)
{
type_register_static(&s390_pcihost_info);
type_register_static(&s390_pcibus_info);
type_register_static(&s390_pci_device_info);
+ type_register_static(&s390_pci_iommu_info);
}
type_init(s390_pci_register_types)
diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h
index 7f2701301e..0aad9cc272 100644
--- a/hw/s390x/s390-pci-bus.h
+++ b/hw/s390x/s390-pci-bus.h
@@ -23,10 +23,11 @@
#define TYPE_S390_PCI_HOST_BRIDGE "s390-pcihost"
#define TYPE_S390_PCI_BUS "s390-pcibus"
#define TYPE_S390_PCI_DEVICE "zpci"
+#define TYPE_S390_PCI_IOMMU "s390-pci-iommu"
#define FH_MASK_ENABLE 0x80000000
#define FH_MASK_INSTANCE 0x7f000000
#define FH_MASK_SHM 0x00ff0000
-#define FH_MASK_INDEX 0x0000001f
+#define FH_MASK_INDEX 0x0000ffff
#define FH_SHM_VFIO 0x00010000
#define FH_SHM_EMUL 0x00020000
#define S390_PCIPT_ADAPTER 2
@@ -42,6 +43,8 @@
OBJECT_CHECK(S390PCIBus, (obj), TYPE_S390_PCI_BUS)
#define S390_PCI_DEVICE(obj) \
OBJECT_CHECK(S390PCIBusDevice, (obj), TYPE_S390_PCI_DEVICE)
+#define S390_PCI_IOMMU(obj) \
+ OBJECT_CHECK(S390PCIIOMMU, (obj), TYPE_S390_PCI_IOMMU)
#define HP_EVENT_TO_CONFIGURED 0x0301
#define HP_EVENT_RESERVED_TO_STANDBY 0x0302
@@ -180,8 +183,8 @@ enum ZpciIoatDtype {
* may enter an error state
* blocked: ignore all DMA and interrupts; transition back to enabled or from
* error state via mpcifc
- * error: an error occured; transition back to enabled via mpcifc
- * permanent error: an unrecoverable error occured; transition to standby via
+ * error: an error occurred; transition back to enabled via mpcifc
+ * permanent error: an unrecoverable error occurred; transition to standby via
* sclp deconfigure
*/
typedef enum {
@@ -258,24 +261,34 @@ typedef struct S390MsixInfo {
uint32_t pba_offset;
} S390MsixInfo;
+typedef struct S390PCIBusDevice S390PCIBusDevice;
typedef struct S390PCIIOMMU {
+ Object parent_obj;
+ S390PCIBusDevice *pbdev;
AddressSpace as;
MemoryRegion mr;
+ MemoryRegion iommu_mr;
+ bool enabled;
+ uint64_t g_iota;
+ uint64_t pba;
+ uint64_t pal;
} S390PCIIOMMU;
+typedef struct S390PCIIOMMUTable {
+ uint64_t key;
+ S390PCIIOMMU *iommu[PCI_SLOT_MAX];
+} S390PCIIOMMUTable;
+
typedef struct S390PCIBusDevice {
DeviceState qdev;
PCIDevice *pdev;
ZpciState state;
- bool iommu_enabled;
char *target;
uint16_t uid;
+ uint32_t idx;
uint32_t fh;
uint32_t fid;
bool fid_defined;
- uint64_t g_iota;
- uint64_t pba;
- uint64_t pal;
uint64_t fmb_addr;
uint8_t isc;
uint16_t noi;
@@ -283,11 +296,11 @@ typedef struct S390PCIBusDevice {
S390MsixInfo msix;
AdapterRoutes routes;
S390PCIIOMMU *iommu;
- MemoryRegion iommu_mr;
MemoryRegion msix_notify_mr;
IndAddr *summary_ind;
IndAddr *indicator;
QEMUTimer *release_timer;
+ QTAILQ_ENTRY(S390PCIBusDevice) link;
} S390PCIBusDevice;
typedef struct S390PCIBus {
@@ -296,23 +309,28 @@ typedef struct S390PCIBus {
typedef struct S390pciState {
PCIHostState parent_obj;
+ uint32_t next_idx;
+ int bus_no;
S390PCIBus *bus;
- S390PCIBusDevice *pbdev[PCI_SLOT_MAX];
- S390PCIIOMMU *iommu[PCI_SLOT_MAX];
+ GHashTable *iommu_table;
+ GHashTable *zpci_table;
QTAILQ_HEAD(, SeiContainer) pending_sei;
+ QTAILQ_HEAD(, S390PCIBusDevice) zpci_devs;
} S390pciState;
+S390pciState *s390_get_phb(void);
int chsc_sei_nt2_get_event(void *res);
int chsc_sei_nt2_have_event(void);
void s390_pci_sclp_configure(SCCB *sccb);
void s390_pci_sclp_deconfigure(SCCB *sccb);
-void s390_pci_iommu_enable(S390PCIBusDevice *pbdev);
-void s390_pci_iommu_disable(S390PCIBusDevice *pbdev);
+void s390_pci_iommu_enable(S390PCIIOMMU *iommu);
+void s390_pci_iommu_disable(S390PCIIOMMU *iommu);
void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
uint64_t faddr, uint32_t e);
-S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx);
-S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh);
-S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid);
-S390PCIBusDevice *s390_pci_find_next_avail_dev(S390PCIBusDevice *pbdev);
+S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx);
+S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh);
+S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid);
+S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
+ S390PCIBusDevice *pbdev);
#endif
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index 0864d9be12..d2a8c0a083 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -18,6 +18,7 @@
#include "s390-pci-bus.h"
#include "exec/memory-internal.h"
#include "qemu/error-report.h"
+#include "sysemu/hw_accel.h"
/* #define DEBUG_S390PCI_INST */
#ifdef DEBUG_S390PCI_INST
@@ -38,6 +39,7 @@ static void s390_set_status_code(CPUS390XState *env,
static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
{
S390PCIBusDevice *pbdev = NULL;
+ S390pciState *s = s390_get_phb();
uint32_t res_code, initial_l2, g_l2;
int rc, i;
uint64_t resume_token;
@@ -65,14 +67,14 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
resume_token = ldq_p(&rrb->request.resume_token);
if (resume_token) {
- pbdev = s390_pci_find_dev_by_idx(resume_token);
+ pbdev = s390_pci_find_dev_by_idx(s, resume_token);
if (!pbdev) {
res_code = CLP_RC_LISTPCI_BADRT;
rc = -EINVAL;
goto out;
}
} else {
- pbdev = s390_pci_find_next_avail_dev(NULL);
+ pbdev = s390_pci_find_next_avail_dev(s, NULL);
}
if (lduw_p(&rrb->response.hdr.len) < 48) {
@@ -118,7 +120,7 @@ static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
lduw_p(&rrb->response.fh_list[i].device_id),
ldl_p(&rrb->response.fh_list[i].fid),
ldl_p(&rrb->response.fh_list[i].fh));
- pbdev = s390_pci_find_next_avail_dev(pbdev);
+ pbdev = s390_pci_find_next_avail_dev(s, pbdev);
i++;
}
@@ -148,6 +150,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
uint8_t buffer[4096 * 2];
uint8_t cc = 0;
CPUS390XState *env = &cpu->env;
+ S390pciState *s = s390_get_phb();
int i;
cpu_synchronize_state(CPU(cpu));
@@ -202,7 +205,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
- pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
+ pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh));
if (!pbdev) {
stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
goto out;
@@ -253,7 +256,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
- pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
+ pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh));
if (!pbdev) {
DPRINTF("query pci no pci dev\n");
stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
@@ -338,7 +341,7 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
- pbdev = s390_pci_find_dev_by_fh(fh);
+ pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("pcilg no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
@@ -471,7 +474,7 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
- pbdev = s390_pci_find_dev_by_fh(fh);
+ pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("pcistg no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
@@ -555,6 +558,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
CPUS390XState *env = &cpu->env;
uint32_t fh;
S390PCIBusDevice *pbdev;
+ S390PCIIOMMU *iommu;
hwaddr start, end;
IOMMUTLBEntry entry;
MemoryRegion *mr;
@@ -575,7 +579,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
start = env->regs[r2];
end = start + env->regs[r2 + 1];
- pbdev = s390_pci_find_dev_by_fh(fh);
+ pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("rpcit no pci dev\n");
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
@@ -597,7 +601,8 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
break;
}
- if (!pbdev->g_iota) {
+ iommu = pbdev->iommu;
+ if (!iommu->g_iota) {
pbdev->state = ZPCI_FS_ERROR;
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
@@ -606,7 +611,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
goto out;
}
- if (end < pbdev->pba || start > pbdev->pal) {
+ if (end < iommu->pba || start > iommu->pal) {
pbdev->state = ZPCI_FS_ERROR;
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
@@ -615,7 +620,7 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
goto out;
}
- mr = &pbdev->iommu_mr;
+ mr = &iommu->iommu_mr;
while (start < end) {
entry = mr->iommu_ops->translate(mr, start, 0);
@@ -677,7 +682,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
return 0;
}
- pbdev = s390_pci_find_dev_by_fh(fh);
+ pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
@@ -783,7 +788,7 @@ int pci_dereg_irqs(S390PCIBusDevice *pbdev)
return 0;
}
-static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
+static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib)
{
uint64_t pba = ldq_p(&fib.pba);
uint64_t pal = ldq_p(&fib.pal);
@@ -803,21 +808,21 @@ static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
return -EINVAL;
}
- pbdev->pba = pba;
- pbdev->pal = pal;
- pbdev->g_iota = g_iota;
+ iommu->pba = pba;
+ iommu->pal = pal;
+ iommu->g_iota = g_iota;
- s390_pci_iommu_enable(pbdev);
+ s390_pci_iommu_enable(iommu);
return 0;
}
-void pci_dereg_ioat(S390PCIBusDevice *pbdev)
+void pci_dereg_ioat(S390PCIIOMMU *iommu)
{
- s390_pci_iommu_disable(pbdev);
- pbdev->pba = 0;
- pbdev->pal = 0;
- pbdev->g_iota = 0;
+ s390_pci_iommu_disable(iommu);
+ iommu->pba = 0;
+ iommu->pal = 0;
+ iommu->g_iota = 0;
}
int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
@@ -843,7 +848,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
return 0;
}
- pbdev = s390_pci_find_dev_by_fh(fh);
+ pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
@@ -892,10 +897,10 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
if (dmaas != 0) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
- } else if (pbdev->iommu_enabled) {
+ } else if (pbdev->iommu->enabled) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
- } else if (reg_ioat(env, pbdev, fib)) {
+ } else if (reg_ioat(env, pbdev->iommu, fib)) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
}
@@ -904,23 +909,23 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
if (dmaas != 0) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
- } else if (!pbdev->iommu_enabled) {
+ } else if (!pbdev->iommu->enabled) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
} else {
- pci_dereg_ioat(pbdev);
+ pci_dereg_ioat(pbdev->iommu);
}
break;
case ZPCI_MOD_FC_REREG_IOAT:
if (dmaas != 0) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
- } else if (!pbdev->iommu_enabled) {
+ } else if (!pbdev->iommu->enabled) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
} else {
- pci_dereg_ioat(pbdev);
- if (reg_ioat(env, pbdev, fib)) {
+ pci_dereg_ioat(pbdev->iommu);
+ if (reg_ioat(env, pbdev->iommu, fib)) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
}
@@ -988,7 +993,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
return 0;
}
- pbdev = s390_pci_find_dev_by_idx(fh & FH_MASK_INDEX);
+ pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
if (!pbdev) {
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
@@ -1015,7 +1020,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
fib.fc |= 0x40;
case ZPCI_FS_ENABLED:
fib.fc |= 0x80;
- if (pbdev->iommu_enabled) {
+ if (pbdev->iommu->enabled) {
fib.fc |= 0x10;
}
if (!(fh & FH_MASK_ENABLE)) {
@@ -1028,9 +1033,9 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
return 0;
}
- stq_p(&fib.pba, pbdev->pba);
- stq_p(&fib.pal, pbdev->pal);
- stq_p(&fib.iota, pbdev->g_iota);
+ stq_p(&fib.pba, pbdev->iommu->pba);
+ stq_p(&fib.pal, pbdev->iommu->pal);
+ stq_p(&fib.iota, pbdev->iommu->g_iota);
stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
stq_p(&fib.fmb_addr, pbdev->fmb_addr);
diff --git a/hw/s390x/s390-pci-inst.h b/hw/s390x/s390-pci-inst.h
index 23f4bfa0ed..94a959f91c 100644
--- a/hw/s390x/s390-pci-inst.h
+++ b/hw/s390x/s390-pci-inst.h
@@ -292,7 +292,7 @@ typedef struct ZpciFib {
} QEMU_PACKED ZpciFib;
int pci_dereg_irqs(S390PCIBusDevice *pbdev);
-void pci_dereg_ioat(S390PCIBusDevice *pbdev);
+void pci_dereg_ioat(S390PCIIOMMU *iommu);
int clp_service_call(S390CPU *cpu, uint8_t r2);
int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index e340eab36b..e9a676797a 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -335,11 +335,13 @@ static const TypeInfo ccw_machine_info = {
} \
type_init(ccw_machine_register_##suffix)
+#define CCW_COMPAT_2_8 \
+ HW_COMPAT_2_8
+
#define CCW_COMPAT_2_7 \
HW_COMPAT_2_7
#define CCW_COMPAT_2_6 \
- CCW_COMPAT_2_7 \
HW_COMPAT_2_6 \
{\
.driver = TYPE_S390_IPL,\
@@ -352,7 +354,6 @@ static const TypeInfo ccw_machine_info = {
},
#define CCW_COMPAT_2_5 \
- CCW_COMPAT_2_6 \
HW_COMPAT_2_5
#define CCW_COMPAT_2_4 \
@@ -395,14 +396,26 @@ static const TypeInfo ccw_machine_info = {
.value = "0",\
},
+static void ccw_machine_2_9_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_2_9_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE(2_9, "2.9", true);
+
static void ccw_machine_2_8_instance_options(MachineState *machine)
{
+ ccw_machine_2_9_instance_options(machine);
}
static void ccw_machine_2_8_class_options(MachineClass *mc)
{
+ ccw_machine_2_9_class_options(mc);
+ SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_8);
}
-DEFINE_CCW_MACHINE(2_8, "2.8", true);
+DEFINE_CCW_MACHINE(2_8, "2.8", false);
static void ccw_machine_2_7_instance_options(MachineState *machine)
{
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index f5c1d98192..63c46373fb 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -149,7 +149,7 @@ static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
} else {
if (info) {
/* virtio-1 allows changing the ring size. */
- if (virtio_queue_get_num(vdev, index) < num) {
+ if (virtio_queue_get_max_num(vdev, index) < num) {
/* Fail if we exceed the maximum number. */
return -EINVAL;
}
@@ -1098,7 +1098,7 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
* We do not support individual masking for channel devices, so we
* need to manually trigger any guest masking callbacks here.
*/
- if (k->guest_notifier_mask) {
+ if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
k->guest_notifier_mask(vdev, n, false);
}
/* get lost events and re-inject */
@@ -1107,7 +1107,7 @@ static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
event_notifier_set(notifier);
}
} else {
- if (k->guest_notifier_mask) {
+ if (k->guest_notifier_mask && vdev->use_guest_notifier_mask) {
k->guest_notifier_mask(vdev, n, true);
}
if (with_irqfd) {
diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c
index 67fc1e7893..6aad7c9a06 100644
--- a/hw/scsi/megasas.c
+++ b/hw/scsi/megasas.c
@@ -683,14 +683,14 @@ static int megasas_map_dcmd(MegasasState *s, MegasasCmd *cmd)
trace_megasas_dcmd_invalid_sge(cmd->index,
cmd->frame->header.sge_count);
cmd->iov_size = 0;
- return -1;
+ return -EINVAL;
}
iov_pa = megasas_sgl_get_addr(cmd, &cmd->frame->dcmd.sgl);
iov_size = megasas_sgl_get_len(cmd, &cmd->frame->dcmd.sgl);
pci_dma_sglist_init(&cmd->qsg, PCI_DEVICE(s), 1);
qemu_sglist_add(&cmd->qsg, iov_pa, iov_size);
cmd->iov_size = iov_size;
- return cmd->iov_size;
+ return 0;
}
static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size)
@@ -1559,19 +1559,20 @@ static const struct dcmd_cmd_tbl_t {
static int megasas_handle_dcmd(MegasasState *s, MegasasCmd *cmd)
{
- int opcode, len;
+ int opcode;
int retval = 0;
+ size_t len;
const struct dcmd_cmd_tbl_t *cmdptr = dcmd_cmd_tbl;
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
trace_megasas_handle_dcmd(cmd->index, opcode);
- len = megasas_map_dcmd(s, cmd);
- if (len < 0) {
+ if (megasas_map_dcmd(s, cmd) < 0) {
return MFI_STAT_MEMORY_NOT_AVAILABLE;
}
while (cmdptr->opcode != -1 && cmdptr->opcode != opcode) {
cmdptr++;
}
+ len = cmd->iov_size;
if (cmdptr->opcode == -1) {
trace_megasas_dcmd_unhandled(cmd->index, opcode, len);
retval = megasas_dcmd_dummy(s, cmd);
@@ -2288,7 +2289,7 @@ static const VMStateDescription vmstate_megasas_gen2 = {
.minimum_version_id = 0,
.minimum_version_id_old = 0,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj, MegasasState),
+ VMSTATE_PCI_DEVICE(parent_obj, MegasasState),
VMSTATE_MSIX(parent_obj, MegasasState),
VMSTATE_INT32(fw_state, MegasasState),
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 297216dfcb..5940cb160c 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -1945,7 +1945,8 @@ SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
/* SCSI request list. For simplicity, pv points to the whole device */
-static void put_scsi_requests(QEMUFile *f, void *pv, size_t size)
+static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
SCSIDevice *s = pv;
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
@@ -1968,9 +1969,12 @@ static void put_scsi_requests(QEMUFile *f, void *pv, size_t size)
}
}
qemu_put_sbyte(f, 0);
+
+ return 0;
}
-static int get_scsi_requests(QEMUFile *f, void *pv, size_t size)
+static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
SCSIDevice *s = pv;
SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index bdd1e5f86c..cc06fe5f6c 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -2157,6 +2157,7 @@ static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
DPRINTF("Write %s(sector %" PRId64 ", count %u)\n",
(command & 0xe) == 0xe ? "And Verify " : "",
r->req.cmd.lba, len);
+ /* fall through */
case VERIFY_10:
case VERIFY_12:
case VERIFY_16:
@@ -2701,7 +2702,7 @@ static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
* for the number of logical blocks specified in the length
* field). For other modes, do not use scatter/gather operation.
*/
- if ((buf[1] & 6) != 2) {
+ if ((buf[1] & 6) == 2) {
return false;
}
break;
diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c
index 5b2694615f..c491ece1f2 100644
--- a/hw/scsi/vhost-scsi.c
+++ b/hw/scsi/vhost-scsi.c
@@ -238,8 +238,16 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
vhost_dummy_handle_output);
if (err != NULL) {
error_propagate(errp, err);
- close(vhostfd);
- return;
+ goto close_fd;
+ }
+
+ error_setg(&s->migration_blocker,
+ "vhost-scsi does not support migration");
+ migrate_add_blocker(s->migration_blocker, &err);
+ if (err) {
+ error_propagate(errp, err);
+ error_free(s->migration_blocker);
+ goto close_fd;
}
s->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
@@ -252,7 +260,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
if (ret < 0) {
error_setg(errp, "vhost-scsi: vhost initialization failed: %s",
strerror(-ret));
- return;
+ goto free_vqs;
}
/* At present, channel and lun both are 0 for bootable vhost-scsi disk */
@@ -261,9 +269,14 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
/* Note: we can also get the minimum tpgt from kernel */
s->target = vs->conf.boot_tpgt;
- error_setg(&s->migration_blocker,
- "vhost-scsi does not support migration");
- migrate_add_blocker(s->migration_blocker);
+ return;
+
+ free_vqs:
+ migrate_del_blocker(s->migration_blocker);
+ g_free(s->dev.vqs);
+ close_fd:
+ close(vhostfd);
+ return;
}
static void vhost_scsi_unrealize(DeviceState *dev, Error **errp)
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 34bba35d83..ce19efffc8 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -198,12 +198,14 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
SCSIBus *bus = sreq->bus;
VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
+ VirtIODevice *vdev = VIRTIO_DEVICE(s);
VirtIOSCSIReq *req;
uint32_t n;
qemu_get_be32s(f, &n);
assert(n < vs->conf.num_queues);
- req = qemu_get_virtqueue_element(f, sizeof(VirtIOSCSIReq) + vs->cdb_size);
+ req = qemu_get_virtqueue_element(vdev, f,
+ sizeof(VirtIOSCSIReq) + vs->cdb_size);
virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
@@ -592,26 +594,32 @@ static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
{
VirtIOSCSIReq *req, *next;
- int ret;
+ int ret = 0;
QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
- while ((req = virtio_scsi_pop_req(s, vq))) {
- ret = virtio_scsi_handle_cmd_req_prepare(s, req);
- if (!ret) {
- QTAILQ_INSERT_TAIL(&reqs, req, next);
- } else if (ret == -EINVAL) {
- /* The device is broken and shouldn't process any request */
- while (!QTAILQ_EMPTY(&reqs)) {
- req = QTAILQ_FIRST(&reqs);
- QTAILQ_REMOVE(&reqs, req, next);
- blk_io_unplug(req->sreq->dev->conf.blk);
- scsi_req_unref(req->sreq);
- virtqueue_detach_element(req->vq, &req->elem, 0);
- virtio_scsi_free_req(req);
+ do {
+ virtio_queue_set_notification(vq, 0);
+
+ while ((req = virtio_scsi_pop_req(s, vq))) {
+ ret = virtio_scsi_handle_cmd_req_prepare(s, req);
+ if (!ret) {
+ QTAILQ_INSERT_TAIL(&reqs, req, next);
+ } else if (ret == -EINVAL) {
+ /* The device is broken and shouldn't process any request */
+ while (!QTAILQ_EMPTY(&reqs)) {
+ req = QTAILQ_FIRST(&reqs);
+ QTAILQ_REMOVE(&reqs, req, next);
+ blk_io_unplug(req->sreq->dev->conf.blk);
+ scsi_req_unref(req->sreq);
+ virtqueue_detach_element(req->vq, &req->elem, 0);
+ virtio_scsi_free_req(req);
+ }
}
}
- }
+
+ virtio_queue_set_notification(vq, 1);
+ } while (ret != -EINVAL && !virtio_queue_empty(vq));
QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
virtio_scsi_handle_cmd_req_submit(s, req);
diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c
index a5ce7dea8e..75575461e2 100644
--- a/hw/scsi/vmw_pvscsi.c
+++ b/hw/scsi/vmw_pvscsi.c
@@ -1207,7 +1207,7 @@ static const VMStateDescription vmstate_pvscsi_pcie_device = {
.name = "pvscsi/pcie",
.needed = pvscsi_vmstate_need_pcie_device,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj, PVSCSIState),
+ VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
VMSTATE_END_OF_LIST()
}
};
diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c
index 3132d559d7..166e4bd947 100644
--- a/hw/sh4/sh7750.c
+++ b/hw/sh4/sh7750.c
@@ -417,7 +417,7 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr,
case SH7750_PTEH_A7:
/* If asid changes, clear all registered tlb entries. */
if ((s->cpu->env.pteh & 0xff) != (mem_value & 0xff)) {
- tlb_flush(CPU(s->cpu), 1);
+ tlb_flush(CPU(s->cpu));
}
s->cpu->env.pteh = mem_value;
return;
diff --git a/hw/smbios/Makefile.objs b/hw/smbios/Makefile.objs
index c3d3753602..23bb2bac07 100644
--- a/hw/smbios/Makefile.objs
+++ b/hw/smbios/Makefile.objs
@@ -1,2 +1,10 @@
-common-obj-$(CONFIG_SMBIOS) += smbios.o
-common-obj-$(call land,$(CONFIG_SMBIOS),$(CONFIG_IPMI)) += smbios_type_38.o
+ifeq ($(CONFIG_SMBIOS),y)
+common-obj-y += smbios.o
+common-obj-$(CONFIG_IPMI) += smbios_type_38.o
+common-obj-$(call lnot,$(CONFIG_IPMI)) += smbios_type_38-stub.o
+else
+common-obj-y += smbios-stub.o
+endif
+
+common-obj-$(CONFIG_ALL) += smbios-stub.o
+common-obj-$(CONFIG_ALL) += smbios_type_38-stub.o
diff --git a/hw/smbios/smbios-stub.c b/hw/smbios/smbios-stub.c
new file mode 100644
index 0000000000..308739410f
--- /dev/null
+++ b/hw/smbios/smbios-stub.c
@@ -0,0 +1,31 @@
+/*
+ * SMBIOS stubs for platforms that don't support SMBIOS.
+ *
+ * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ * Copyright (c) 2016 Leif Lindholm <leif.lindholm@linaro.org>
+ * Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qmp/qerror.h"
+#include "qmp-commands.h"
+#include "hw/smbios/smbios.h"
+
+void smbios_entry_add(QemuOpts *opts, Error **errp)
+{
+ error_setg(errp, QERR_UNSUPPORTED);
+}
diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c
index 3a96cededd..1a5437a07d 100644
--- a/hw/smbios/smbios.c
+++ b/hw/smbios/smbios.c
@@ -882,7 +882,7 @@ static void save_opt(const char **dest, QemuOpts *opts, const char *name)
}
}
-void smbios_entry_add(QemuOpts *opts)
+void smbios_entry_add(QemuOpts *opts, Error **errp)
{
const char *val;
diff --git a/stubs/smbios_type_38.c b/hw/smbios/smbios_type_38-stub.c
index 9528c2c28e..9528c2c28e 100644
--- a/stubs/smbios_type_38.c
+++ b/hw/smbios/smbios_type_38-stub.c
diff --git a/hw/sparc64/Makefile.objs b/hw/sparc64/Makefile.objs
index a84cfe3ec7..cf9de21133 100644
--- a/hw/sparc64/Makefile.objs
+++ b/hw/sparc64/Makefile.objs
@@ -1 +1,3 @@
+obj-y += sparc64.o
obj-y += sun4u.o
+obj-y += niagara.o \ No newline at end of file
diff --git a/hw/sparc64/niagara.c b/hw/sparc64/niagara.c
new file mode 100644
index 0000000000..b55d4bb8d3
--- /dev/null
+++ b/hw/sparc64/niagara.c
@@ -0,0 +1,177 @@
+/*
+ * QEMU Sun4v/Niagara System Emulator
+ *
+ * Copyright (c) 2016 Artyom Tarasenko
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "hw/char/serial.h"
+#include "hw/empty_slot.h"
+#include "hw/loader.h"
+#include "hw/sparc/sparc64.h"
+#include "hw/timer/sun4v-rtc.h"
+#include "exec/address-spaces.h"
+#include "sysemu/block-backend.h"
+
+
+typedef struct NiagaraBoardState {
+ MemoryRegion hv_ram;
+ MemoryRegion partition_ram;
+ MemoryRegion nvram;
+ MemoryRegion md_rom;
+ MemoryRegion hv_rom;
+ MemoryRegion vdisk_ram;
+ MemoryRegion prom;
+} NiagaraBoardState;
+
+#define NIAGARA_HV_RAM_BASE 0x100000ULL
+#define NIAGARA_HV_RAM_SIZE 0x3f00000ULL /* 63 MiB */
+
+#define NIAGARA_PARTITION_RAM_BASE 0x80000000ULL
+
+#define NIAGARA_UART_BASE 0x1f10000000ULL
+
+#define NIAGARA_NVRAM_BASE 0x1f11000000ULL
+#define NIAGARA_NVRAM_SIZE 0x2000
+
+#define NIAGARA_MD_ROM_BASE 0x1f12000000ULL
+#define NIAGARA_MD_ROM_SIZE 0x2000
+
+#define NIAGARA_HV_ROM_BASE 0x1f12080000ULL
+#define NIAGARA_HV_ROM_SIZE 0x2000
+
+#define NIAGARA_IOBBASE 0x9800000000ULL
+#define NIAGARA_IOBSIZE 0x0100000000ULL
+
+#define NIAGARA_VDISK_BASE 0x1f40000000ULL
+#define NIAGARA_RTC_BASE 0xfff0c1fff8ULL
+#define NIAGARA_UART_BASE 0x1f10000000ULL
+
+/* Firmware layout
+ *
+ * |------------------|
+ * | openboot.bin |
+ * |------------------| PROM_ADDR + OBP_OFFSET
+ * | q.bin |
+ * |------------------| PROM_ADDR + Q_OFFSET
+ * | reset.bin |
+ * |------------------| PROM_ADDR
+ */
+#define NIAGARA_PROM_BASE 0xfff0000000ULL
+#define NIAGARA_Q_OFFSET 0x10000ULL
+#define NIAGARA_OBP_OFFSET 0x80000ULL
+#define PROM_SIZE_MAX (4 * 1024 * 1024)
+
+/* Niagara hardware initialisation */
+static void niagara_init(MachineState *machine)
+{
+ NiagaraBoardState *s = g_new(NiagaraBoardState, 1);
+ DriveInfo *dinfo = drive_get_next(IF_PFLASH);
+ MemoryRegion *sysmem = get_system_memory();
+
+ /* init CPUs */
+ sparc64_cpu_devinit(machine->cpu_model, "Sun UltraSparc T1",
+ NIAGARA_PROM_BASE);
+ /* set up devices */
+ memory_region_allocate_system_memory(&s->hv_ram, NULL, "sun4v-hv.ram",
+ NIAGARA_HV_RAM_SIZE);
+ memory_region_add_subregion(sysmem, NIAGARA_HV_RAM_BASE, &s->hv_ram);
+
+ memory_region_allocate_system_memory(&s->partition_ram, NULL,
+ "sun4v-partition.ram",
+ machine->ram_size);
+ memory_region_add_subregion(sysmem, NIAGARA_PARTITION_RAM_BASE,
+ &s->partition_ram);
+
+ memory_region_allocate_system_memory(&s->nvram, NULL,
+ "sun4v.nvram", NIAGARA_NVRAM_SIZE);
+ memory_region_add_subregion(sysmem, NIAGARA_NVRAM_BASE, &s->nvram);
+ memory_region_allocate_system_memory(&s->md_rom, NULL,
+ "sun4v-md.rom", NIAGARA_MD_ROM_SIZE);
+ memory_region_add_subregion(sysmem, NIAGARA_MD_ROM_BASE, &s->md_rom);
+ memory_region_allocate_system_memory(&s->hv_rom, NULL,
+ "sun4v-hv.rom", NIAGARA_HV_ROM_SIZE);
+ memory_region_add_subregion(sysmem, NIAGARA_HV_ROM_BASE, &s->hv_rom);
+ memory_region_allocate_system_memory(&s->prom, NULL,
+ "sun4v.prom", PROM_SIZE_MAX);
+ memory_region_add_subregion(sysmem, NIAGARA_PROM_BASE, &s->prom);
+
+ rom_add_file_fixed("nvram1", NIAGARA_NVRAM_BASE, -1);
+ rom_add_file_fixed("1up-md.bin", NIAGARA_MD_ROM_BASE, -1);
+ rom_add_file_fixed("1up-hv.bin", NIAGARA_HV_ROM_BASE, -1);
+
+ rom_add_file_fixed("reset.bin", NIAGARA_PROM_BASE, -1);
+ rom_add_file_fixed("q.bin", NIAGARA_PROM_BASE + NIAGARA_Q_OFFSET, -1);
+ rom_add_file_fixed("openboot.bin", NIAGARA_PROM_BASE + NIAGARA_OBP_OFFSET,
+ -1);
+
+ /* the virtual ramdisk is kind of initrd, but it resides
+ outside of the partition RAM */
+ if (dinfo) {
+ BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
+ int size = blk_getlength(blk);
+ if (size > 0) {
+ memory_region_allocate_system_memory(&s->vdisk_ram, NULL,
+ "sun4v_vdisk.ram", size);
+ memory_region_add_subregion(get_system_memory(),
+ NIAGARA_VDISK_BASE, &s->vdisk_ram);
+ dinfo->is_default = 1;
+ rom_add_file_fixed(blk_bs(blk)->filename, NIAGARA_VDISK_BASE, -1);
+ } else {
+ fprintf(stderr, "qemu: could not load ram disk '%s'\n",
+ blk_bs(blk)->filename);
+ exit(1);
+ }
+ }
+ serial_mm_init(sysmem, NIAGARA_UART_BASE, 0, NULL, 115200,
+ serial_hds[0], DEVICE_BIG_ENDIAN);
+
+ empty_slot_init(NIAGARA_IOBBASE, NIAGARA_IOBSIZE);
+ sun4v_rtc_init(NIAGARA_RTC_BASE);
+}
+
+static void niagara_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "Sun4v platform, Niagara";
+ mc->init = niagara_init;
+ mc->max_cpus = 1; /* XXX for now */
+ mc->default_boot_order = "c";
+}
+
+static const TypeInfo niagara_type = {
+ .name = MACHINE_TYPE_NAME("niagara"),
+ .parent = TYPE_MACHINE,
+ .class_init = niagara_class_init,
+};
+
+static void niagara_register_types(void)
+{
+ type_register_static(&niagara_type);
+}
+
+type_init(niagara_register_types)
diff --git a/hw/sparc64/sparc64.c b/hw/sparc64/sparc64.c
new file mode 100644
index 0000000000..b3d219c769
--- /dev/null
+++ b/hw/sparc64/sparc64.c
@@ -0,0 +1,378 @@
+/*
+ * QEMU Sun4u/Sun4v System Emulator common routines
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "hw/char/serial.h"
+#include "hw/sparc/sparc64.h"
+#include "qemu/timer.h"
+
+
+//#define DEBUG_IRQ
+//#define DEBUG_TIMER
+
+#ifdef DEBUG_IRQ
+#define CPUIRQ_DPRINTF(fmt, ...) \
+ do { printf("CPUIRQ: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define CPUIRQ_DPRINTF(fmt, ...)
+#endif
+
+#ifdef DEBUG_TIMER
+#define TIMER_DPRINTF(fmt, ...) \
+ do { printf("TIMER: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define TIMER_DPRINTF(fmt, ...)
+#endif
+
+#define TICK_MAX 0x7fffffffffffffffULL
+
+void cpu_check_irqs(CPUSPARCState *env)
+{
+ CPUState *cs;
+ uint32_t pil = env->pil_in |
+ (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER));
+
+ /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */
+ if (env->ivec_status & 0x20) {
+ return;
+ }
+ cs = CPU(sparc_env_get_cpu(env));
+ /* check if TM or SM in SOFTINT are set
+ setting these also causes interrupt 14 */
+ if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) {
+ pil |= 1 << 14;
+ }
+
+ /* The bit corresponding to psrpil is (1<< psrpil), the next bit
+ is (2 << psrpil). */
+ if (pil < (2 << env->psrpil)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ CPUIRQ_DPRINTF("Reset CPU IRQ (current interrupt %x)\n",
+ env->interrupt_index);
+ env->interrupt_index = 0;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ return;
+ }
+
+ if (cpu_interrupts_enabled(env)) {
+
+ unsigned int i;
+
+ for (i = 15; i > env->psrpil; i--) {
+ if (pil & (1 << i)) {
+ int old_interrupt = env->interrupt_index;
+ int new_interrupt = TT_EXTINT | i;
+
+ if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt
+ && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) {
+ CPUIRQ_DPRINTF("Not setting CPU IRQ: TL=%d "
+ "current %x >= pending %x\n",
+ env->tl, cpu_tsptr(env)->tt, new_interrupt);
+ } else if (old_interrupt != new_interrupt) {
+ env->interrupt_index = new_interrupt;
+ CPUIRQ_DPRINTF("Set CPU IRQ %d old=%x new=%x\n", i,
+ old_interrupt, new_interrupt);
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ break;
+ }
+ }
+ } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ CPUIRQ_DPRINTF("Interrupts disabled, pil=%08x pil_in=%08x softint=%08x "
+ "current interrupt %x\n",
+ pil, env->pil_in, env->softint, env->interrupt_index);
+ env->interrupt_index = 0;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+static void cpu_kick_irq(SPARCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUSPARCState *env = &cpu->env;
+
+ cs->halted = 0;
+ cpu_check_irqs(env);
+ qemu_cpu_kick(cs);
+}
+
+void sparc64_cpu_set_ivec_irq(void *opaque, int irq, int level)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+ CPUState *cs;
+
+ if (level) {
+ if (!(env->ivec_status & 0x20)) {
+ CPUIRQ_DPRINTF("Raise IVEC IRQ %d\n", irq);
+ cs = CPU(cpu);
+ cs->halted = 0;
+ env->interrupt_index = TT_IVEC;
+ env->ivec_status |= 0x20;
+ env->ivec_data[0] = (0x1f << 6) | irq;
+ env->ivec_data[1] = 0;
+ env->ivec_data[2] = 0;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ } else {
+ if (env->ivec_status & 0x20) {
+ CPUIRQ_DPRINTF("Lower IVEC IRQ %d\n", irq);
+ cs = CPU(cpu);
+ env->ivec_status &= ~0x20;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+}
+
+typedef struct ResetData {
+ SPARCCPU *cpu;
+ uint64_t prom_addr;
+} ResetData;
+
+static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu,
+ QEMUBHFunc *cb, uint32_t frequency,
+ uint64_t disabled_mask, uint64_t npt_mask)
+{
+ CPUTimer *timer = g_malloc0(sizeof(CPUTimer));
+
+ timer->name = name;
+ timer->frequency = frequency;
+ timer->disabled_mask = disabled_mask;
+ timer->npt_mask = npt_mask;
+
+ timer->disabled = 1;
+ timer->npt = 1;
+ timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+
+ timer->qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cb, cpu);
+
+ return timer;
+}
+
+static void cpu_timer_reset(CPUTimer *timer)
+{
+ timer->disabled = 1;
+ timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+
+ timer_del(timer->qtimer);
+}
+
+static void main_cpu_reset(void *opaque)
+{
+ ResetData *s = (ResetData *)opaque;
+ CPUSPARCState *env = &s->cpu->env;
+ static unsigned int nr_resets;
+
+ cpu_reset(CPU(s->cpu));
+
+ cpu_timer_reset(env->tick);
+ cpu_timer_reset(env->stick);
+ cpu_timer_reset(env->hstick);
+
+ env->gregs[1] = 0; /* Memory start */
+ env->gregs[2] = ram_size; /* Memory size */
+ env->gregs[3] = 0; /* Machine description XXX */
+ if (nr_resets++ == 0) {
+ /* Power on reset */
+ env->pc = s->prom_addr + 0x20ULL;
+ } else {
+ env->pc = s->prom_addr + 0x40ULL;
+ }
+ env->npc = env->pc + 4;
+}
+
+static void tick_irq(void *opaque)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+
+ CPUTimer *timer = env->tick;
+
+ if (timer->disabled) {
+ CPUIRQ_DPRINTF("tick_irq: softint disabled\n");
+ return;
+ } else {
+ CPUIRQ_DPRINTF("tick: fire\n");
+ }
+
+ env->softint |= SOFTINT_TIMER;
+ cpu_kick_irq(cpu);
+}
+
+static void stick_irq(void *opaque)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+
+ CPUTimer *timer = env->stick;
+
+ if (timer->disabled) {
+ CPUIRQ_DPRINTF("stick_irq: softint disabled\n");
+ return;
+ } else {
+ CPUIRQ_DPRINTF("stick: fire\n");
+ }
+
+ env->softint |= SOFTINT_STIMER;
+ cpu_kick_irq(cpu);
+}
+
+static void hstick_irq(void *opaque)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+
+ CPUTimer *timer = env->hstick;
+
+ if (timer->disabled) {
+ CPUIRQ_DPRINTF("hstick_irq: softint disabled\n");
+ return;
+ } else {
+ CPUIRQ_DPRINTF("hstick: fire\n");
+ }
+
+ env->softint |= SOFTINT_STIMER;
+ cpu_kick_irq(cpu);
+}
+
+static int64_t cpu_to_timer_ticks(int64_t cpu_ticks, uint32_t frequency)
+{
+ return muldiv64(cpu_ticks, NANOSECONDS_PER_SECOND, frequency);
+}
+
+static uint64_t timer_to_cpu_ticks(int64_t timer_ticks, uint32_t frequency)
+{
+ return muldiv64(timer_ticks, frequency, NANOSECONDS_PER_SECOND);
+}
+
+void cpu_tick_set_count(CPUTimer *timer, uint64_t count)
+{
+ uint64_t real_count = count & ~timer->npt_mask;
+ uint64_t npt_bit = count & timer->npt_mask;
+
+ int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
+ cpu_to_timer_ticks(real_count, timer->frequency);
+
+ TIMER_DPRINTF("%s set_count count=0x%016lx (npt %s) p=%p\n",
+ timer->name, real_count,
+ timer->npt ? "disabled" : "enabled", timer);
+
+ timer->npt = npt_bit ? 1 : 0;
+ timer->clock_offset = vm_clock_offset;
+}
+
+uint64_t cpu_tick_get_count(CPUTimer *timer)
+{
+ uint64_t real_count = timer_to_cpu_ticks(
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->clock_offset,
+ timer->frequency);
+
+ TIMER_DPRINTF("%s get_count count=0x%016lx (npt %s) p=%p\n",
+ timer->name, real_count,
+ timer->npt ? "disabled" : "enabled", timer);
+
+ if (timer->npt) {
+ real_count |= timer->npt_mask;
+ }
+
+ return real_count;
+}
+
+void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit)
+{
+ int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+
+ uint64_t real_limit = limit & ~timer->disabled_mask;
+ timer->disabled = (limit & timer->disabled_mask) ? 1 : 0;
+
+ int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) +
+ timer->clock_offset;
+
+ if (expires < now) {
+ expires = now + 1;
+ }
+
+ TIMER_DPRINTF("%s set_limit limit=0x%016lx (%s) p=%p "
+ "called with limit=0x%016lx at 0x%016lx (delta=0x%016lx)\n",
+ timer->name, real_limit,
+ timer->disabled ? "disabled" : "enabled",
+ timer, limit,
+ timer_to_cpu_ticks(now - timer->clock_offset,
+ timer->frequency),
+ timer_to_cpu_ticks(expires - now, timer->frequency));
+
+ if (!real_limit) {
+ TIMER_DPRINTF("%s set_limit limit=ZERO - not starting timer\n",
+ timer->name);
+ timer_del(timer->qtimer);
+ } else if (timer->disabled) {
+ timer_del(timer->qtimer);
+ } else {
+ timer_mod(timer->qtimer, expires);
+ }
+}
+
+SPARCCPU *sparc64_cpu_devinit(const char *cpu_model,
+ const char *default_cpu_model, uint64_t prom_addr)
+{
+ SPARCCPU *cpu;
+ CPUSPARCState *env;
+ ResetData *reset_info;
+
+ uint32_t tick_frequency = 100 * 1000000;
+ uint32_t stick_frequency = 100 * 1000000;
+ uint32_t hstick_frequency = 100 * 1000000;
+
+ if (cpu_model == NULL) {
+ cpu_model = default_cpu_model;
+ }
+ cpu = cpu_sparc_init(cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find Sparc CPU definition\n");
+ exit(1);
+ }
+ env = &cpu->env;
+
+ env->tick = cpu_timer_create("tick", cpu, tick_irq,
+ tick_frequency, TICK_INT_DIS,
+ TICK_NPT_MASK);
+
+ env->stick = cpu_timer_create("stick", cpu, stick_irq,
+ stick_frequency, TICK_INT_DIS,
+ TICK_NPT_MASK);
+
+ env->hstick = cpu_timer_create("hstick", cpu, hstick_irq,
+ hstick_frequency, TICK_INT_DIS,
+ TICK_NPT_MASK);
+
+ reset_info = g_malloc0(sizeof(ResetData));
+ reset_info->cpu = cpu;
+ reset_info->prom_addr = prom_addr;
+ qemu_register_reset(main_cpu_reset, reset_info);
+
+ return cpu;
+}
diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c
index 466331535b..d1a6bca873 100644
--- a/hw/sparc64/sun4u.c
+++ b/hw/sparc64/sun4u.c
@@ -38,25 +38,15 @@
#include "hw/boards.h"
#include "hw/nvram/sun_nvram.h"
#include "hw/nvram/chrp_nvram.h"
+#include "hw/sparc/sparc64.h"
#include "hw/nvram/fw_cfg.h"
#include "hw/sysbus.h"
#include "hw/ide.h"
#include "hw/loader.h"
#include "elf.h"
-#include "sysemu/block-backend.h"
-#include "exec/address-spaces.h"
#include "qemu/cutils.h"
-//#define DEBUG_IRQ
//#define DEBUG_EBUS
-//#define DEBUG_TIMER
-
-#ifdef DEBUG_IRQ
-#define CPUIRQ_DPRINTF(fmt, ...) \
- do { printf("CPUIRQ: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define CPUIRQ_DPRINTF(fmt, ...)
-#endif
#ifdef DEBUG_EBUS
#define EBUS_DPRINTF(fmt, ...) \
@@ -65,13 +55,6 @@
#define EBUS_DPRINTF(fmt, ...)
#endif
-#ifdef DEBUG_TIMER
-#define TIMER_DPRINTF(fmt, ...) \
- do { printf("TIMER: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define TIMER_DPRINTF(fmt, ...)
-#endif
-
#define KERNEL_LOAD_ADDR 0x00404000
#define CMDLINE_ADDR 0x003ff000
#define PROM_SIZE_MAX (4 * 1024 * 1024)
@@ -89,8 +72,6 @@
#define IVEC_MAX 0x40
-#define TICK_MAX 0x7fffffffffffffffULL
-
struct hwdef {
const char * const default_cpu_model;
uint16_t machine_id;
@@ -216,293 +197,11 @@ static uint64_t sun4u_load_kernel(const char *kernel_filename,
return kernel_size;
}
-void cpu_check_irqs(CPUSPARCState *env)
-{
- CPUState *cs;
- uint32_t pil = env->pil_in |
- (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER));
-
- /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */
- if (env->ivec_status & 0x20) {
- return;
- }
- cs = CPU(sparc_env_get_cpu(env));
- /* check if TM or SM in SOFTINT are set
- setting these also causes interrupt 14 */
- if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) {
- pil |= 1 << 14;
- }
-
- /* The bit corresponding to psrpil is (1<< psrpil), the next bit
- is (2 << psrpil). */
- if (pil < (2 << env->psrpil)){
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
- CPUIRQ_DPRINTF("Reset CPU IRQ (current interrupt %x)\n",
- env->interrupt_index);
- env->interrupt_index = 0;
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
- }
- return;
- }
-
- if (cpu_interrupts_enabled(env)) {
-
- unsigned int i;
-
- for (i = 15; i > env->psrpil; i--) {
- if (pil & (1 << i)) {
- int old_interrupt = env->interrupt_index;
- int new_interrupt = TT_EXTINT | i;
-
- if (unlikely(env->tl > 0 && cpu_tsptr(env)->tt > new_interrupt
- && ((cpu_tsptr(env)->tt & 0x1f0) == TT_EXTINT))) {
- CPUIRQ_DPRINTF("Not setting CPU IRQ: TL=%d "
- "current %x >= pending %x\n",
- env->tl, cpu_tsptr(env)->tt, new_interrupt);
- } else if (old_interrupt != new_interrupt) {
- env->interrupt_index = new_interrupt;
- CPUIRQ_DPRINTF("Set CPU IRQ %d old=%x new=%x\n", i,
- old_interrupt, new_interrupt);
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- }
- break;
- }
- }
- } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
- CPUIRQ_DPRINTF("Interrupts disabled, pil=%08x pil_in=%08x softint=%08x "
- "current interrupt %x\n",
- pil, env->pil_in, env->softint, env->interrupt_index);
- env->interrupt_index = 0;
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
- }
-}
-
-static void cpu_kick_irq(SPARCCPU *cpu)
-{
- CPUState *cs = CPU(cpu);
- CPUSPARCState *env = &cpu->env;
-
- cs->halted = 0;
- cpu_check_irqs(env);
- qemu_cpu_kick(cs);
-}
-
-static void cpu_set_ivec_irq(void *opaque, int irq, int level)
-{
- SPARCCPU *cpu = opaque;
- CPUSPARCState *env = &cpu->env;
- CPUState *cs;
-
- if (level) {
- if (!(env->ivec_status & 0x20)) {
- CPUIRQ_DPRINTF("Raise IVEC IRQ %d\n", irq);
- cs = CPU(cpu);
- cs->halted = 0;
- env->interrupt_index = TT_IVEC;
- env->ivec_status |= 0x20;
- env->ivec_data[0] = (0x1f << 6) | irq;
- env->ivec_data[1] = 0;
- env->ivec_data[2] = 0;
- cpu_interrupt(cs, CPU_INTERRUPT_HARD);
- }
- } else {
- if (env->ivec_status & 0x20) {
- CPUIRQ_DPRINTF("Lower IVEC IRQ %d\n", irq);
- cs = CPU(cpu);
- env->ivec_status &= ~0x20;
- cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
- }
- }
-}
-
typedef struct ResetData {
SPARCCPU *cpu;
uint64_t prom_addr;
} ResetData;
-static CPUTimer *cpu_timer_create(const char *name, SPARCCPU *cpu,
- QEMUBHFunc *cb, uint32_t frequency,
- uint64_t disabled_mask, uint64_t npt_mask)
-{
- CPUTimer *timer = g_malloc0(sizeof (CPUTimer));
-
- timer->name = name;
- timer->frequency = frequency;
- timer->disabled_mask = disabled_mask;
- timer->npt_mask = npt_mask;
-
- timer->disabled = 1;
- timer->npt = 1;
- timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-
- timer->qtimer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cb, cpu);
-
- return timer;
-}
-
-static void cpu_timer_reset(CPUTimer *timer)
-{
- timer->disabled = 1;
- timer->clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-
- timer_del(timer->qtimer);
-}
-
-static void main_cpu_reset(void *opaque)
-{
- ResetData *s = (ResetData *)opaque;
- CPUSPARCState *env = &s->cpu->env;
- static unsigned int nr_resets;
-
- cpu_reset(CPU(s->cpu));
-
- cpu_timer_reset(env->tick);
- cpu_timer_reset(env->stick);
- cpu_timer_reset(env->hstick);
-
- env->gregs[1] = 0; // Memory start
- env->gregs[2] = ram_size; // Memory size
- env->gregs[3] = 0; // Machine description XXX
- if (nr_resets++ == 0) {
- /* Power on reset */
- env->pc = s->prom_addr + 0x20ULL;
- } else {
- env->pc = s->prom_addr + 0x40ULL;
- }
- env->npc = env->pc + 4;
-}
-
-static void tick_irq(void *opaque)
-{
- SPARCCPU *cpu = opaque;
- CPUSPARCState *env = &cpu->env;
-
- CPUTimer* timer = env->tick;
-
- if (timer->disabled) {
- CPUIRQ_DPRINTF("tick_irq: softint disabled\n");
- return;
- } else {
- CPUIRQ_DPRINTF("tick: fire\n");
- }
-
- env->softint |= SOFTINT_TIMER;
- cpu_kick_irq(cpu);
-}
-
-static void stick_irq(void *opaque)
-{
- SPARCCPU *cpu = opaque;
- CPUSPARCState *env = &cpu->env;
-
- CPUTimer* timer = env->stick;
-
- if (timer->disabled) {
- CPUIRQ_DPRINTF("stick_irq: softint disabled\n");
- return;
- } else {
- CPUIRQ_DPRINTF("stick: fire\n");
- }
-
- env->softint |= SOFTINT_STIMER;
- cpu_kick_irq(cpu);
-}
-
-static void hstick_irq(void *opaque)
-{
- SPARCCPU *cpu = opaque;
- CPUSPARCState *env = &cpu->env;
-
- CPUTimer* timer = env->hstick;
-
- if (timer->disabled) {
- CPUIRQ_DPRINTF("hstick_irq: softint disabled\n");
- return;
- } else {
- CPUIRQ_DPRINTF("hstick: fire\n");
- }
-
- env->softint |= SOFTINT_STIMER;
- cpu_kick_irq(cpu);
-}
-
-static int64_t cpu_to_timer_ticks(int64_t cpu_ticks, uint32_t frequency)
-{
- return muldiv64(cpu_ticks, NANOSECONDS_PER_SECOND, frequency);
-}
-
-static uint64_t timer_to_cpu_ticks(int64_t timer_ticks, uint32_t frequency)
-{
- return muldiv64(timer_ticks, frequency, NANOSECONDS_PER_SECOND);
-}
-
-void cpu_tick_set_count(CPUTimer *timer, uint64_t count)
-{
- uint64_t real_count = count & ~timer->npt_mask;
- uint64_t npt_bit = count & timer->npt_mask;
-
- int64_t vm_clock_offset = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
- cpu_to_timer_ticks(real_count, timer->frequency);
-
- TIMER_DPRINTF("%s set_count count=0x%016lx (npt %s) p=%p\n",
- timer->name, real_count,
- timer->npt ? "disabled" : "enabled", timer);
-
- timer->npt = npt_bit ? 1 : 0;
- timer->clock_offset = vm_clock_offset;
-}
-
-uint64_t cpu_tick_get_count(CPUTimer *timer)
-{
- uint64_t real_count = timer_to_cpu_ticks(
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - timer->clock_offset,
- timer->frequency);
-
- TIMER_DPRINTF("%s get_count count=0x%016lx (npt %s) p=%p\n",
- timer->name, real_count,
- timer->npt ? "disabled" : "enabled", timer);
-
- if (timer->npt) {
- real_count |= timer->npt_mask;
- }
-
- return real_count;
-}
-
-void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit)
-{
- int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-
- uint64_t real_limit = limit & ~timer->disabled_mask;
- timer->disabled = (limit & timer->disabled_mask) ? 1 : 0;
-
- int64_t expires = cpu_to_timer_ticks(real_limit, timer->frequency) +
- timer->clock_offset;
-
- if (expires < now) {
- expires = now + 1;
- }
-
- TIMER_DPRINTF("%s set_limit limit=0x%016lx (%s) p=%p "
- "called with limit=0x%016lx at 0x%016lx (delta=0x%016lx)\n",
- timer->name, real_limit,
- timer->disabled?"disabled":"enabled",
- timer, limit,
- timer_to_cpu_ticks(now - timer->clock_offset,
- timer->frequency),
- timer_to_cpu_ticks(expires - now, timer->frequency));
-
- if (!real_limit) {
- TIMER_DPRINTF("%s set_limit limit=ZERO - not starting timer\n",
- timer->name);
- timer_del(timer->qtimer);
- } else if (timer->disabled) {
- timer_del(timer->qtimer);
- } else {
- timer_mod(timer->qtimer, expires);
- }
-}
-
static void isa_irq_handler(void *opaque, int n, int level)
{
static const int isa_irq_to_ivec[16] = {
@@ -723,46 +422,6 @@ static const TypeInfo ram_info = {
.class_init = ram_class_init,
};
-static SPARCCPU *cpu_devinit(const char *cpu_model, const struct hwdef *hwdef)
-{
- SPARCCPU *cpu;
- CPUSPARCState *env;
- ResetData *reset_info;
-
- uint32_t tick_frequency = 100*1000000;
- uint32_t stick_frequency = 100*1000000;
- uint32_t hstick_frequency = 100*1000000;
-
- if (cpu_model == NULL) {
- cpu_model = hwdef->default_cpu_model;
- }
- cpu = cpu_sparc_init(cpu_model);
- if (cpu == NULL) {
- fprintf(stderr, "Unable to find Sparc CPU definition\n");
- exit(1);
- }
- env = &cpu->env;
-
- env->tick = cpu_timer_create("tick", cpu, tick_irq,
- tick_frequency, TICK_INT_DIS,
- TICK_NPT_MASK);
-
- env->stick = cpu_timer_create("stick", cpu, stick_irq,
- stick_frequency, TICK_INT_DIS,
- TICK_NPT_MASK);
-
- env->hstick = cpu_timer_create("hstick", cpu, hstick_irq,
- hstick_frequency, TICK_INT_DIS,
- TICK_NPT_MASK);
-
- reset_info = g_malloc0(sizeof(ResetData));
- reset_info->cpu = cpu;
- reset_info->prom_addr = hwdef->prom_addr;
- qemu_register_reset(main_cpu_reset, reset_info);
-
- return cpu;
-}
-
static void sun4uv_init(MemoryRegion *address_space_mem,
MachineState *machine,
const struct hwdef *hwdef)
@@ -781,14 +440,15 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
FWCfgState *fw_cfg;
/* init CPUs */
- cpu = cpu_devinit(machine->cpu_model, hwdef);
+ cpu = sparc64_cpu_devinit(machine->cpu_model, hwdef->default_cpu_model,
+ hwdef->prom_addr);
/* set up devices */
ram_init(0, machine->ram_size);
prom_init(hwdef->prom_addr, bios_name);
- ivec_irqs = qemu_allocate_irqs(cpu_set_ivec_irq, cpu, IVEC_MAX);
+ ivec_irqs = qemu_allocate_irqs(sparc64_cpu_set_ivec_irq, cpu, IVEC_MAX);
pci_bus = pci_apb_init(APB_SPECIAL_BASE, APB_MEM_BASE, ivec_irqs, &pci_bus2,
&pci_bus3, &pbm_irqs);
pci_vga_init(pci_bus);
@@ -882,7 +542,6 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
enum {
sun4u_id = 0,
sun4v_id = 64,
- niagara_id,
};
static const struct hwdef hwdefs[] = {
@@ -900,13 +559,6 @@ static const struct hwdef hwdefs[] = {
.prom_addr = 0x1fff0000000ULL,
.console_serial_base = 0,
},
- /* Sun4v generic Niagara machine */
- {
- .default_cpu_model = "Sun UltraSparc T1",
- .machine_id = niagara_id,
- .prom_addr = 0xfff0000000ULL,
- .console_serial_base = 0xfff0c2c000ULL,
- },
};
/* Sun4u hardware initialisation */
@@ -921,12 +573,6 @@ static void sun4v_init(MachineState *machine)
sun4uv_init(get_system_memory(), machine, &hwdefs[1]);
}
-/* Niagara hardware initialisation */
-static void niagara_init(MachineState *machine)
-{
- sun4uv_init(get_system_memory(), machine, &hwdefs[2]);
-}
-
static void sun4u_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -960,22 +606,6 @@ static const TypeInfo sun4v_type = {
.class_init = sun4v_class_init,
};
-static void niagara_class_init(ObjectClass *oc, void *data)
-{
- MachineClass *mc = MACHINE_CLASS(oc);
-
- mc->desc = "Sun4v platform, Niagara";
- mc->init = niagara_init;
- mc->max_cpus = 1; /* XXX for now */
- mc->default_boot_order = "c";
-}
-
-static const TypeInfo niagara_type = {
- .name = MACHINE_TYPE_NAME("Niagara"),
- .parent = TYPE_MACHINE,
- .class_init = niagara_class_init,
-};
-
static void sun4u_register_types(void)
{
type_register_static(&ebus_info);
@@ -984,7 +614,6 @@ static void sun4u_register_types(void)
type_register_static(&sun4u_type);
type_register_static(&sun4v_type);
- type_register_static(&niagara_type);
}
type_init(sun4u_register_types)
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
index 6e8403ebc2..ae1ad2dba6 100644
--- a/hw/ssi/aspeed_smc.c
+++ b/hw/ssi/aspeed_smc.c
@@ -39,11 +39,14 @@
#define CONF_ENABLE_W2 18
#define CONF_ENABLE_W1 17
#define CONF_ENABLE_W0 16
-#define CONF_FLASH_TYPE4 9
-#define CONF_FLASH_TYPE3 7
-#define CONF_FLASH_TYPE2 5
-#define CONF_FLASH_TYPE1 3
-#define CONF_FLASH_TYPE0 1
+#define CONF_FLASH_TYPE4 8
+#define CONF_FLASH_TYPE3 6
+#define CONF_FLASH_TYPE2 4
+#define CONF_FLASH_TYPE1 2
+#define CONF_FLASH_TYPE0 0
+#define CONF_FLASH_TYPE_NOR 0x0
+#define CONF_FLASH_TYPE_NAND 0x1
+#define CONF_FLASH_TYPE_SPI 0x2
/* CE Control Register */
#define R_CE_CTRL (0x04 / 4)
@@ -66,6 +69,7 @@
#define R_CTRL0 (0x10 / 4)
#define CTRL_CMD_SHIFT 16
#define CTRL_CMD_MASK 0xff
+#define CTRL_AST2400_SPI_4BYTE (1 << 13)
#define CTRL_CE_STOP_ACTIVE (1 << 2)
#define CTRL_CMD_MODE_MASK 0x3
#define CTRL_READMODE 0x0
@@ -127,11 +131,17 @@
#define R_SPI_MISC_CTRL (0x10 / 4)
#define R_SPI_TIMINGS (0x14 / 4)
+#define ASPEED_SMC_R_SPI_MAX (0x20 / 4)
+#define ASPEED_SMC_R_SMC_MAX (0x20 / 4)
+
#define ASPEED_SOC_SMC_FLASH_BASE 0x10000000
#define ASPEED_SOC_FMC_FLASH_BASE 0x20000000
#define ASPEED_SOC_SPI_FLASH_BASE 0x30000000
#define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000
+/* Flash opcodes. */
+#define SPI_OP_READ 0x03 /* Read data bytes (low frequency) */
+
/*
* Default segments mapping addresses and size for each slave per
* controller. These can be changed when board is initialized with the
@@ -170,24 +180,85 @@ static const AspeedSegments aspeed_segments_ast2500_spi2[] = {
};
static const AspeedSMCController controllers[] = {
- { "aspeed.smc.smc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 5, aspeed_segments_legacy,
- ASPEED_SOC_SMC_FLASH_BASE, 0x6000000 },
- { "aspeed.smc.fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 5, aspeed_segments_fmc,
- ASPEED_SOC_FMC_FLASH_BASE, 0x10000000 },
- { "aspeed.smc.spi", R_SPI_CONF, 0xff, R_SPI_CTRL0, R_SPI_TIMINGS,
- SPI_CONF_ENABLE_W0, 1, aspeed_segments_spi,
- ASPEED_SOC_SPI_FLASH_BASE, 0x10000000 },
- { "aspeed.smc.ast2500-fmc", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 3, aspeed_segments_ast2500_fmc,
- ASPEED_SOC_FMC_FLASH_BASE, 0x10000000 },
- { "aspeed.smc.ast2500-spi1", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 2, aspeed_segments_ast2500_spi1,
- ASPEED_SOC_SPI_FLASH_BASE, 0x8000000 },
- { "aspeed.smc.ast2500-spi2", R_CONF, R_CE_CTRL, R_CTRL0, R_TIMINGS,
- CONF_ENABLE_W0, 2, aspeed_segments_ast2500_spi2,
- ASPEED_SOC_SPI2_FLASH_BASE, 0x8000000 },
+ {
+ .name = "aspeed.smc.smc",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 5,
+ .segments = aspeed_segments_legacy,
+ .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE,
+ .flash_window_size = 0x6000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_SMC_MAX,
+ }, {
+ .name = "aspeed.smc.fmc",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 5,
+ .segments = aspeed_segments_fmc,
+ .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE,
+ .flash_window_size = 0x10000000,
+ .has_dma = true,
+ .nregs = ASPEED_SMC_R_MAX,
+ }, {
+ .name = "aspeed.smc.spi",
+ .r_conf = R_SPI_CONF,
+ .r_ce_ctrl = 0xff,
+ .r_ctrl0 = R_SPI_CTRL0,
+ .r_timings = R_SPI_TIMINGS,
+ .conf_enable_w0 = SPI_CONF_ENABLE_W0,
+ .max_slaves = 1,
+ .segments = aspeed_segments_spi,
+ .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE,
+ .flash_window_size = 0x10000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_SPI_MAX,
+ }, {
+ .name = "aspeed.smc.ast2500-fmc",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 3,
+ .segments = aspeed_segments_ast2500_fmc,
+ .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE,
+ .flash_window_size = 0x10000000,
+ .has_dma = true,
+ .nregs = ASPEED_SMC_R_MAX,
+ }, {
+ .name = "aspeed.smc.ast2500-spi1",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 2,
+ .segments = aspeed_segments_ast2500_spi1,
+ .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE,
+ .flash_window_size = 0x8000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_MAX,
+ }, {
+ .name = "aspeed.smc.ast2500-spi2",
+ .r_conf = R_CONF,
+ .r_ce_ctrl = R_CE_CTRL,
+ .r_ctrl0 = R_CTRL0,
+ .r_timings = R_TIMINGS,
+ .conf_enable_w0 = CONF_ENABLE_W0,
+ .max_slaves = 2,
+ .segments = aspeed_segments_ast2500_spi2,
+ .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE,
+ .flash_window_size = 0x8000000,
+ .has_dma = false,
+ .nregs = ASPEED_SMC_R_MAX,
+ },
};
/*
@@ -253,7 +324,8 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Tried to change CS0 start address to 0x%"
HWADDR_PRIx "\n", s->ctrl->name, seg.addr);
- return;
+ seg.addr = s->ctrl->flash_window_base;
+ new = aspeed_smc_segment_to_reg(&seg);
}
/*
@@ -267,8 +339,10 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
s->ctrl->segments[cs].size) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Tried to change CS%d end address to 0x%"
- HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr);
- return;
+ HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size);
+ seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size -
+ seg.addr;
+ new = aspeed_smc_segment_to_reg(&seg);
}
/* Keep the segment in the overall flash window */
@@ -281,16 +355,14 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs,
}
/* Check start address vs. alignment */
- if (seg.addr % seg.size) {
+ if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not "
"aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
s->ctrl->name, cs, seg.addr, seg.addr + seg.size);
}
- /* And segments should not overlap */
- if (aspeed_smc_flash_overlap(s, &seg, cs)) {
- return;
- }
+ /* And segments should not overlap (in the specs) */
+ aspeed_smc_flash_overlap(s, &seg, cs);
/* All should be fine now to move the region */
memory_region_transaction_begin();
@@ -327,36 +399,137 @@ static const MemoryRegionOps aspeed_smc_flash_default_ops = {
},
};
-static inline int aspeed_smc_flash_mode(const AspeedSMCState *s, int cs)
+static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl)
+{
+ const AspeedSMCState *s = fl->controller;
+
+ return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK;
+}
+
+static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl)
{
- return s->regs[s->r_ctrl0 + cs] & CTRL_CMD_MODE_MASK;
+ const AspeedSMCState *s = fl->controller;
+
+ return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id));
}
-static inline bool aspeed_smc_is_usermode(const AspeedSMCState *s, int cs)
+static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl)
{
- return aspeed_smc_flash_mode(s, cs) == CTRL_USERMODE;
+ const AspeedSMCState *s = fl->controller;
+ int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK;
+
+ /* In read mode, the default SPI command is READ (0x3). In other
+ * modes, the command should necessarily be defined */
+ if (aspeed_smc_flash_mode(fl) == CTRL_READMODE) {
+ cmd = SPI_OP_READ;
+ }
+
+ if (!cmd) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n",
+ __func__, aspeed_smc_flash_mode(fl));
+ }
+
+ return cmd;
}
-static inline bool aspeed_smc_is_writable(const AspeedSMCState *s, int cs)
+static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl)
{
- return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + cs));
+ const AspeedSMCState *s = fl->controller;
+
+ if (s->ctrl->segments == aspeed_segments_spi) {
+ return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE;
+ } else {
+ return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id));
+ }
+}
+
+static inline bool aspeed_smc_is_ce_stop_active(const AspeedSMCFlash *fl)
+{
+ const AspeedSMCState *s = fl->controller;
+
+ return s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE;
+}
+
+static void aspeed_smc_flash_select(AspeedSMCFlash *fl)
+{
+ AspeedSMCState *s = fl->controller;
+
+ s->regs[s->r_ctrl0 + fl->id] &= ~CTRL_CE_STOP_ACTIVE;
+ qemu_set_irq(s->cs_lines[fl->id], aspeed_smc_is_ce_stop_active(fl));
+}
+
+static void aspeed_smc_flash_unselect(AspeedSMCFlash *fl)
+{
+ AspeedSMCState *s = fl->controller;
+
+ s->regs[s->r_ctrl0 + fl->id] |= CTRL_CE_STOP_ACTIVE;
+ qemu_set_irq(s->cs_lines[fl->id], aspeed_smc_is_ce_stop_active(fl));
+}
+
+static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl,
+ uint32_t addr)
+{
+ const AspeedSMCState *s = fl->controller;
+ AspeedSegments seg;
+
+ aspeed_smc_reg_to_segment(s->regs[R_SEG_ADDR0 + fl->id], &seg);
+ if ((addr & (seg.size - 1)) != addr) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid address 0x%08x for CS%d segment : "
+ "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n",
+ s->ctrl->name, addr, fl->id, seg.addr,
+ seg.addr + seg.size);
+ }
+
+ addr &= seg.size - 1;
+ return addr;
+}
+
+static void aspeed_smc_flash_send_addr(AspeedSMCFlash *fl, uint32_t addr)
+{
+ const AspeedSMCState *s = fl->controller;
+ uint8_t cmd = aspeed_smc_flash_cmd(fl);
+
+ /* Flash access can not exceed CS segment */
+ addr = aspeed_smc_check_segment_addr(fl, addr);
+
+ ssi_transfer(s->spi, cmd);
+
+ if (aspeed_smc_flash_is_4byte(fl)) {
+ ssi_transfer(s->spi, (addr >> 24) & 0xff);
+ }
+ ssi_transfer(s->spi, (addr >> 16) & 0xff);
+ ssi_transfer(s->spi, (addr >> 8) & 0xff);
+ ssi_transfer(s->spi, (addr & 0xff));
}
static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size)
{
AspeedSMCFlash *fl = opaque;
- const AspeedSMCState *s = fl->controller;
+ AspeedSMCState *s = fl->controller;
uint64_t ret = 0;
int i;
- if (aspeed_smc_is_usermode(s, fl->id)) {
+ switch (aspeed_smc_flash_mode(fl)) {
+ case CTRL_USERMODE:
for (i = 0; i < size; i++) {
ret |= ssi_transfer(s->spi, 0x0) << (8 * i);
}
- } else {
- qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n",
- __func__);
- ret = -1;
+ break;
+ case CTRL_READMODE:
+ case CTRL_FREADMODE:
+ aspeed_smc_flash_select(fl);
+ aspeed_smc_flash_send_addr(fl, addr);
+
+ for (i = 0; i < size; i++) {
+ ret |= ssi_transfer(s->spi, 0x0) << (8 * i);
+ }
+
+ aspeed_smc_flash_unselect(fl);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n",
+ __func__, aspeed_smc_flash_mode(fl));
}
return ret;
@@ -366,23 +539,34 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data,
unsigned size)
{
AspeedSMCFlash *fl = opaque;
- const AspeedSMCState *s = fl->controller;
+ AspeedSMCState *s = fl->controller;
int i;
- if (!aspeed_smc_is_writable(s, fl->id)) {
+ if (!aspeed_smc_is_writable(fl)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%"
HWADDR_PRIx "\n", __func__, addr);
return;
}
- if (!aspeed_smc_is_usermode(s, fl->id)) {
- qemu_log_mask(LOG_UNIMP, "%s: usermode not implemented\n",
- __func__);
- return;
- }
+ switch (aspeed_smc_flash_mode(fl)) {
+ case CTRL_USERMODE:
+ for (i = 0; i < size; i++) {
+ ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ }
+ break;
+ case CTRL_WRITEMODE:
+ aspeed_smc_flash_select(fl);
+ aspeed_smc_flash_send_addr(fl, addr);
+
+ for (i = 0; i < size; i++) {
+ ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ }
- for (i = 0; i < size; i++) {
- ssi_transfer(s->spi, (data >> (8 * i)) & 0xff);
+ aspeed_smc_flash_unselect(fl);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n",
+ __func__, aspeed_smc_flash_mode(fl));
}
}
@@ -396,18 +580,11 @@ static const MemoryRegionOps aspeed_smc_flash_ops = {
},
};
-static bool aspeed_smc_is_ce_stop_active(const AspeedSMCState *s, int cs)
+static void aspeed_smc_flash_update_cs(AspeedSMCFlash *fl)
{
- return s->regs[s->r_ctrl0 + cs] & CTRL_CE_STOP_ACTIVE;
-}
-
-static void aspeed_smc_update_cs(const AspeedSMCState *s)
-{
- int i;
+ const AspeedSMCState *s = fl->controller;
- for (i = 0; i < s->num_cs; ++i) {
- qemu_set_irq(s->cs_lines[i], aspeed_smc_is_ce_stop_active(s, i));
- }
+ qemu_set_irq(s->cs_lines[fl->id], aspeed_smc_is_ce_stop_active(fl));
}
static void aspeed_smc_reset(DeviceState *d)
@@ -423,6 +600,7 @@ static void aspeed_smc_reset(DeviceState *d)
/* Unselect all slaves */
for (i = 0; i < s->num_cs; ++i) {
s->regs[s->r_ctrl0 + i] |= CTRL_CE_STOP_ACTIVE;
+ qemu_set_irq(s->cs_lines[i], true);
}
/* setup default segment register values for all */
@@ -431,7 +609,24 @@ static void aspeed_smc_reset(DeviceState *d)
aspeed_smc_segment_to_reg(&s->ctrl->segments[i]);
}
- aspeed_smc_update_cs(s);
+ /* HW strapping for AST2500 FMC controllers */
+ if (s->ctrl->segments == aspeed_segments_ast2500_fmc) {
+ /* flash type is fixed to SPI for CE0 and CE1 */
+ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0);
+ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1);
+
+ /* 4BYTE mode is autodetected for CE0. Let's force it to 1 for
+ * now */
+ s->regs[s->r_ce_ctrl] |= (1 << (CTRL_EXTENDED0));
+ }
+
+ /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the
+ * configuration of the palmetto-bmc machine */
+ if (s->ctrl->segments == aspeed_segments_fmc) {
+ s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0);
+
+ s->regs[s->r_ce_ctrl] |= (1 << (CTRL_EXTENDED0));
+ }
}
static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size)
@@ -440,13 +635,6 @@ static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size)
addr >>= 2;
- if (addr >= ARRAY_SIZE(s->regs)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds read at 0x%" HWADDR_PRIx "\n",
- __func__, addr);
- return 0;
- }
-
if (addr == s->r_conf ||
addr == s->r_timings ||
addr == s->r_ce_ctrl ||
@@ -469,20 +657,14 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data,
addr >>= 2;
- if (addr >= ARRAY_SIZE(s->regs)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Out-of-bounds write at 0x%" HWADDR_PRIx "\n",
- __func__, addr);
- return;
- }
-
if (addr == s->r_conf ||
addr == s->r_timings ||
addr == s->r_ce_ctrl) {
s->regs[addr] = value;
} else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) {
+ int cs = addr - s->r_ctrl0;
s->regs[addr] = value;
- aspeed_smc_update_cs(s);
+ aspeed_smc_flash_update_cs(&s->flashes[cs]);
} else if (addr >= R_SEG_ADDR0 &&
addr < R_SEG_ADDR0 + s->ctrl->max_slaves) {
int cs = addr - R_SEG_ADDR0;
@@ -540,11 +722,9 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(sbd, &s->cs_lines[i]);
}
- aspeed_smc_reset(dev);
-
/* The memory region for the controller registers */
memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s,
- s->ctrl->name, ASPEED_SMC_R_MAX * 4);
+ s->ctrl->name, s->ctrl->nregs * 4);
sysbus_init_mmio(sbd, &s->mmio);
/*
diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c
index e4e395fa67..b66505ca49 100644
--- a/hw/ssi/imx_spi.c
+++ b/hw/ssi/imx_spi.c
@@ -320,9 +320,6 @@ static void imx_spi_write(void *opaque, hwaddr offset, uint64_t value,
TYPE_IMX_SPI, __func__);
break;
case ECSPI_TXDATA:
- case ECSPI_MSGDATA:
- /* Is there any difference between TXDATA and MSGDATA ? */
- /* I'll have to look in the linux driver */
if (!imx_spi_is_enabled(s)) {
/* Ignore writes if device is disabled */
break;
@@ -380,6 +377,14 @@ static void imx_spi_write(void *opaque, hwaddr offset, uint64_t value,
}
break;
+ case ECSPI_MSGDATA:
+ /* it is not clear from the spec what MSGDATA is for */
+ /* Anyway it is not used by Linux driver */
+ /* So for now we just ignore it */
+ qemu_log_mask(LOG_UNIMP,
+ "[%s]%s: Trying to write to MSGDATA, ignoring\n",
+ TYPE_IMX_SPI, __func__);
+ break;
default:
s->regs[index] = value;
diff --git a/hw/timer/Makefile.objs b/hw/timer/Makefile.objs
index 7ba8c23c75..71994f2d88 100644
--- a/hw/timer/Makefile.objs
+++ b/hw/timer/Makefile.objs
@@ -18,6 +18,7 @@ common-obj-$(CONFIG_IMX) += imx_gpt.o
common-obj-$(CONFIG_LM32) += lm32_timer.o
common-obj-$(CONFIG_MILKYMIST) += milkymist-sysctl.o
+obj-$(CONFIG_ALTERA_TIMER) += altera_timer.o
obj-$(CONFIG_EXYNOS4) += exynos4210_mct.o
obj-$(CONFIG_EXYNOS4) += exynos4210_pwm.o
obj-$(CONFIG_EXYNOS4) += exynos4210_rtc.o
@@ -34,3 +35,5 @@ obj-$(CONFIG_ALLWINNER_A10_PIT) += allwinner-a10-pit.o
common-obj-$(CONFIG_STM32F2XX_TIMER) += stm32f2xx_timer.o
common-obj-$(CONFIG_ASPEED_SOC) += aspeed_timer.o
+
+common-obj-$(CONFIG_SUN4V_RTC) += sun4v-rtc.o
diff --git a/hw/timer/altera_timer.c b/hw/timer/altera_timer.c
new file mode 100644
index 0000000000..6d4862661d
--- /dev/null
+++ b/hw/timer/altera_timer.c
@@ -0,0 +1,237 @@
+/*
+ * QEMU model of the Altera timer.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qapi/error.h"
+
+#include "hw/sysbus.h"
+#include "sysemu/sysemu.h"
+#include "hw/ptimer.h"
+
+#define R_STATUS 0
+#define R_CONTROL 1
+#define R_PERIODL 2
+#define R_PERIODH 3
+#define R_SNAPL 4
+#define R_SNAPH 5
+#define R_MAX 6
+
+#define STATUS_TO 0x0001
+#define STATUS_RUN 0x0002
+
+#define CONTROL_ITO 0x0001
+#define CONTROL_CONT 0x0002
+#define CONTROL_START 0x0004
+#define CONTROL_STOP 0x0008
+
+#define TYPE_ALTERA_TIMER "ALTR.timer"
+#define ALTERA_TIMER(obj) \
+ OBJECT_CHECK(AlteraTimer, (obj), TYPE_ALTERA_TIMER)
+
+typedef struct AlteraTimer {
+ SysBusDevice busdev;
+ MemoryRegion mmio;
+ qemu_irq irq;
+ uint32_t freq_hz;
+ QEMUBH *bh;
+ ptimer_state *ptimer;
+ uint32_t regs[R_MAX];
+} AlteraTimer;
+
+static int timer_irq_state(AlteraTimer *t)
+{
+ bool irq = (t->regs[R_STATUS] & STATUS_TO) &&
+ (t->regs[R_CONTROL] & CONTROL_ITO);
+ return irq;
+}
+
+static uint64_t timer_read(void *opaque, hwaddr addr,
+ unsigned int size)
+{
+ AlteraTimer *t = opaque;
+ uint64_t r = 0;
+
+ addr >>= 2;
+
+ switch (addr) {
+ case R_CONTROL:
+ r = t->regs[R_CONTROL] & (CONTROL_ITO | CONTROL_CONT);
+ break;
+
+ default:
+ if (addr < ARRAY_SIZE(t->regs)) {
+ r = t->regs[addr];
+ }
+ break;
+ }
+
+ return r;
+}
+
+static void timer_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned int size)
+{
+ AlteraTimer *t = opaque;
+ uint64_t tvalue;
+ uint32_t count = 0;
+ int irqState = timer_irq_state(t);
+
+ addr >>= 2;
+
+ switch (addr) {
+ case R_STATUS:
+ /* The timeout bit is cleared by writing the status register. */
+ t->regs[R_STATUS] &= ~STATUS_TO;
+ break;
+
+ case R_CONTROL:
+ t->regs[R_CONTROL] = value & (CONTROL_ITO | CONTROL_CONT);
+ if ((value & CONTROL_START) &&
+ !(t->regs[R_STATUS] & STATUS_RUN)) {
+ ptimer_run(t->ptimer, 1);
+ t->regs[R_STATUS] |= STATUS_RUN;
+ }
+ if ((value & CONTROL_STOP) && (t->regs[R_STATUS] & STATUS_RUN)) {
+ ptimer_stop(t->ptimer);
+ t->regs[R_STATUS] &= ~STATUS_RUN;
+ }
+ break;
+
+ case R_PERIODL:
+ case R_PERIODH:
+ t->regs[addr] = value & 0xFFFF;
+ if (t->regs[R_STATUS] & STATUS_RUN) {
+ ptimer_stop(t->ptimer);
+ t->regs[R_STATUS] &= ~STATUS_RUN;
+ }
+ tvalue = (t->regs[R_PERIODH] << 16) | t->regs[R_PERIODL];
+ ptimer_set_limit(t->ptimer, tvalue + 1, 1);
+ break;
+
+ case R_SNAPL:
+ case R_SNAPH:
+ count = ptimer_get_count(t->ptimer);
+ t->regs[R_SNAPL] = count & 0xFFFF;
+ t->regs[R_SNAPH] = count >> 16;
+ break;
+
+ default:
+ break;
+ }
+
+ if (irqState != timer_irq_state(t)) {
+ qemu_set_irq(t->irq, timer_irq_state(t));
+ }
+}
+
+static const MemoryRegionOps timer_ops = {
+ .read = timer_read,
+ .write = timer_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 4
+ }
+};
+
+static void timer_hit(void *opaque)
+{
+ AlteraTimer *t = opaque;
+ const uint64_t tvalue = (t->regs[R_PERIODH] << 16) | t->regs[R_PERIODL];
+
+ t->regs[R_STATUS] |= STATUS_TO;
+
+ ptimer_set_limit(t->ptimer, tvalue + 1, 1);
+
+ if (!(t->regs[R_CONTROL] & CONTROL_CONT)) {
+ t->regs[R_STATUS] &= ~STATUS_RUN;
+ ptimer_set_count(t->ptimer, tvalue);
+ } else {
+ ptimer_run(t->ptimer, 1);
+ }
+
+ qemu_set_irq(t->irq, timer_irq_state(t));
+}
+
+static void altera_timer_realize(DeviceState *dev, Error **errp)
+{
+ AlteraTimer *t = ALTERA_TIMER(dev);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+
+ if (t->freq_hz == 0) {
+ error_setg(errp, "\"clock-frequency\" property must be provided.");
+ return;
+ }
+
+ t->bh = qemu_bh_new(timer_hit, t);
+ t->ptimer = ptimer_init(t->bh, PTIMER_POLICY_DEFAULT);
+ ptimer_set_freq(t->ptimer, t->freq_hz);
+
+ memory_region_init_io(&t->mmio, OBJECT(t), &timer_ops, t,
+ TYPE_ALTERA_TIMER, R_MAX * sizeof(uint32_t));
+ sysbus_init_mmio(sbd, &t->mmio);
+}
+
+static void altera_timer_init(Object *obj)
+{
+ AlteraTimer *t = ALTERA_TIMER(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ sysbus_init_irq(sbd, &t->irq);
+}
+
+static void altera_timer_reset(DeviceState *dev)
+{
+ AlteraTimer *t = ALTERA_TIMER(dev);
+
+ ptimer_stop(t->ptimer);
+ ptimer_set_limit(t->ptimer, 0xffffffff, 1);
+ memset(t->regs, 0, ARRAY_SIZE(t->regs));
+}
+
+static Property altera_timer_properties[] = {
+ DEFINE_PROP_UINT32("clock-frequency", AlteraTimer, freq_hz, 0),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void altera_timer_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = altera_timer_realize;
+ dc->props = altera_timer_properties;
+ dc->reset = altera_timer_reset;
+}
+
+static const TypeInfo altera_timer_info = {
+ .name = TYPE_ALTERA_TIMER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(AlteraTimer),
+ .instance_init = altera_timer_init,
+ .class_init = altera_timer_class_init,
+};
+
+static void altera_timer_register(void)
+{
+ type_register_static(&altera_timer_info);
+}
+
+type_init(altera_timer_register)
diff --git a/hw/timer/ds1338.c b/hw/timer/ds1338.c
index 0112949e23..3849b74a68 100644
--- a/hw/timer/ds1338.c
+++ b/hw/timer/ds1338.c
@@ -94,7 +94,7 @@ static void inc_regptr(DS1338State *s)
}
}
-static void ds1338_event(I2CSlave *i2c, enum i2c_event event)
+static int ds1338_event(I2CSlave *i2c, enum i2c_event event)
{
DS1338State *s = DS1338(i2c);
@@ -113,6 +113,8 @@ static void ds1338_event(I2CSlave *i2c, enum i2c_event event)
default:
break;
}
+
+ return 0;
}
static int ds1338_recv(I2CSlave *i2c)
@@ -198,11 +200,6 @@ static int ds1338_send(I2CSlave *i2c, uint8_t data)
return 0;
}
-static int ds1338_init(I2CSlave *i2c)
-{
- return 0;
-}
-
static void ds1338_reset(DeviceState *dev)
{
DS1338State *s = DS1338(dev);
@@ -220,7 +217,6 @@ static void ds1338_class_init(ObjectClass *klass, void *data)
DeviceClass *dc = DEVICE_CLASS(klass);
I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
- k->init = ds1338_init;
k->event = ds1338_event;
k->recv = ds1338_recv;
k->send = ds1338_send;
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index da209d02f0..637f8722a7 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -946,11 +946,23 @@ static Property mc146818rtc_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static void rtc_resetdev(DeviceState *d)
+{
+ RTCState *s = MC146818_RTC(d);
+
+ /* Reason: VM do suspend self will set 0xfe
+ * Reset any values other than 0xfe(Guest suspend case) */
+ if (s->cmos_data[0x0f] != 0xfe) {
+ s->cmos_data[0x0f] = 0x00;
+ }
+}
+
static void rtc_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = rtc_realizefn;
+ dc->reset = rtc_resetdev;
dc->vmsd = &vmstate_rtc;
dc->props = mc146818rtc_properties;
/* Reason: needs to be wired up by rtc_init() */
diff --git a/hw/timer/sun4v-rtc.c b/hw/timer/sun4v-rtc.c
new file mode 100644
index 0000000000..310523225f
--- /dev/null
+++ b/hw/timer/sun4v-rtc.c
@@ -0,0 +1,102 @@
+/*
+ * QEMU sun4v Real Time Clock device
+ *
+ * The sun4v_rtc device (sun4v tod clock)
+ *
+ * Copyright (c) 2016 Artyom Tarasenko
+ *
+ * This code is licensed under the GNU GPL v3 or (at your option) any later
+ * version.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "qemu/timer.h"
+#include "hw/timer/sun4v-rtc.h"
+
+//#define DEBUG_SUN4V_RTC
+
+#ifdef DEBUG_SUN4V_RTC
+#define DPRINTF(fmt, ...) \
+ do { printf("sun4v_rtc: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) do {} while (0)
+#endif
+
+#define TYPE_SUN4V_RTC "sun4v_rtc"
+#define SUN4V_RTC(obj) OBJECT_CHECK(Sun4vRtc, (obj), TYPE_SUN4V_RTC)
+
+typedef struct Sun4vRtc {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+} Sun4vRtc;
+
+static uint64_t sun4v_rtc_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ uint64_t val = get_clock_realtime() / NANOSECONDS_PER_SECOND;
+ if (!(addr & 4ULL)) {
+ /* accessing the high 32 bits */
+ val >>= 32;
+ }
+ DPRINTF("read from " TARGET_FMT_plx " val %lx\n", addr, val);
+ return val;
+}
+
+static void sun4v_rtc_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ DPRINTF("write 0x%x to " TARGET_FMT_plx "\n", (unsigned)val, addr);
+}
+
+static const MemoryRegionOps sun4v_rtc_ops = {
+ .read = sun4v_rtc_read,
+ .write = sun4v_rtc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+void sun4v_rtc_init(hwaddr addr)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+
+ dev = qdev_create(NULL, TYPE_SUN4V_RTC);
+ s = SYS_BUS_DEVICE(dev);
+
+ qdev_init_nofail(dev);
+
+ sysbus_mmio_map(s, 0, addr);
+}
+
+static int sun4v_rtc_init1(SysBusDevice *dev)
+{
+ Sun4vRtc *s = SUN4V_RTC(dev);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &sun4v_rtc_ops, s,
+ "sun4v-rtc", 0x08ULL);
+ sysbus_init_mmio(dev, &s->iomem);
+ return 0;
+}
+
+static void sun4v_rtc_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = sun4v_rtc_init1;
+}
+
+static const TypeInfo sun4v_rtc_info = {
+ .name = TYPE_SUN4V_RTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Sun4vRtc),
+ .class_init = sun4v_rtc_class_init,
+};
+
+static void sun4v_rtc_register_types(void)
+{
+ type_register_static(&sun4v_rtc_info);
+}
+
+type_init(sun4v_rtc_register_types)
diff --git a/hw/timer/twl92230.c b/hw/timer/twl92230.c
index 7ba4e9a7c9..c0aa8ae3de 100644
--- a/hw/timer/twl92230.c
+++ b/hw/timer/twl92230.c
@@ -713,12 +713,14 @@ static void menelaus_write(void *opaque, uint8_t addr, uint8_t value)
}
}
-static void menelaus_event(I2CSlave *i2c, enum i2c_event event)
+static int menelaus_event(I2CSlave *i2c, enum i2c_event event)
{
MenelausState *s = TWL92230(i2c);
if (event == I2C_START_SEND)
s->firstbyte = 1;
+
+ return 0;
}
static int menelaus_tx(I2CSlave *i2c, uint8_t data)
@@ -747,17 +749,21 @@ static int menelaus_rx(I2CSlave *i2c)
Or we broke compatibility in the state, or we can't use struct tm
*/
-static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size)
+static int get_int32_as_uint16(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
int *v = pv;
*v = qemu_get_be16(f);
return 0;
}
-static void put_int32_as_uint16(QEMUFile *f, void *pv, size_t size)
+static int put_int32_as_uint16(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
int *v = pv;
qemu_put_be16(f, *v);
+
+ return 0;
}
static const VMStateInfo vmstate_hack_int32_as_uint16 = {
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
index 25913ad488..1dcc35c8f8 100644
--- a/hw/usb/bus.c
+++ b/hw/usb/bus.c
@@ -8,6 +8,7 @@
#include "monitor/monitor.h"
#include "trace.h"
#include "qemu/cutils.h"
+#include "migration/migration.h"
static void usb_bus_dev_print(Monitor *mon, DeviceState *qdev, int indent);
@@ -686,6 +687,8 @@ USBDevice *usbdevice_create(const char *cmdline)
const char *params;
int len;
USBDevice *dev;
+ ObjectClass *klass;
+ DeviceClass *dc;
params = strchr(cmdline,':');
if (params) {
@@ -720,6 +723,22 @@ USBDevice *usbdevice_create(const char *cmdline)
return NULL;
}
+ klass = object_class_by_name(f->name);
+ if (klass == NULL) {
+ error_report("Device '%s' not found", f->name);
+ return NULL;
+ }
+
+ dc = DEVICE_CLASS(klass);
+
+ if (only_migratable) {
+ if (dc->vmsd->unmigratable) {
+ error_report("Device %s is not migratable, but --only-migratable "
+ "was specified", f->name);
+ return NULL;
+ }
+ }
+
if (f->usbdevice_init) {
dev = f->usbdevice_init(bus, params);
} else {
diff --git a/hw/usb/ccid-card-emulated.c b/hw/usb/ccid-card-emulated.c
index eceb5f3ee2..99627860a3 100644
--- a/hw/usb/ccid-card-emulated.c
+++ b/hw/usb/ccid-card-emulated.c
@@ -407,7 +407,7 @@ static int init_event_notifier(EmulatedState *card)
DPRINTF(card, 2, "event notifier creation failed\n");
return -1;
}
- event_notifier_set_handler(&card->notifier, false, card_event_handler);
+ event_notifier_set_handler(&card->notifier, card_event_handler);
return 0;
}
diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c
index 9cb0f50750..94c2e94f10 100644
--- a/hw/usb/dev-mtp.c
+++ b/hw/usb/dev-mtp.c
@@ -1093,7 +1093,7 @@ static MTPData *usb_mtp_get_object_prop_value(MTPState *s, MTPControl *c,
}
break;
case PROP_PERSISTENT_UNIQUE_OBJECT_IDENTIFIER:
- /* Should be persistant between sessions,
+ /* Should be persistent between sessions,
* but using our objedt ID is "good enough"
* for now */
usb_mtp_add_u64(d, 0x0000000000000000);
@@ -1580,6 +1580,8 @@ static void usb_mtp_class_initfn(ObjectClass *klass, void *data)
uc->handle_reset = usb_mtp_handle_reset;
uc->handle_control = usb_mtp_handle_control;
uc->handle_data = usb_mtp_handle_data;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+ dc->desc = "USB Media Transfer Protocol device";
dc->fw_name = "mtp";
dc->vmsd = &vmstate_usb_mtp;
dc->props = mtp_properties;
diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c
index 4acf0c6dd8..e0b516987f 100644
--- a/hw/usb/hcd-xhci.c
+++ b/hw/usb/hcd-xhci.c
@@ -3894,7 +3894,7 @@ static const VMStateDescription vmstate_xhci = {
.version_id = 1,
.post_load = usb_xhci_post_load,
.fields = (VMStateField[]) {
- VMSTATE_PCIE_DEVICE(parent_obj, XHCIState),
+ VMSTATE_PCI_DEVICE(parent_obj, XHCIState),
VMSTATE_MSIX(parent_obj, XHCIState),
VMSTATE_STRUCT_VARRAY_UINT32(ports, XHCIState, numports, 1,
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index a65723781e..4a0ebbfb32 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -2165,7 +2165,8 @@ static int usbredir_post_load(void *priv, int version_id)
}
/* For usbredirparser migration */
-static void usbredir_put_parser(QEMUFile *f, void *priv, size_t unused)
+static int usbredir_put_parser(QEMUFile *f, void *priv, size_t unused,
+ VMStateField *field, QJSON *vmdesc)
{
USBRedirDevice *dev = priv;
uint8_t *data;
@@ -2173,7 +2174,7 @@ static void usbredir_put_parser(QEMUFile *f, void *priv, size_t unused)
if (dev->parser == NULL) {
qemu_put_be32(f, 0);
- return;
+ return 0;
}
usbredirparser_serialize(dev->parser, &data, &len);
@@ -2183,9 +2184,12 @@ static void usbredir_put_parser(QEMUFile *f, void *priv, size_t unused)
qemu_put_buffer(f, data, len);
free(data);
+
+ return 0;
}
-static int usbredir_get_parser(QEMUFile *f, void *priv, size_t unused)
+static int usbredir_get_parser(QEMUFile *f, void *priv, size_t unused,
+ VMStateField *field)
{
USBRedirDevice *dev = priv;
uint8_t *data;
@@ -2228,7 +2232,8 @@ static const VMStateInfo usbredir_parser_vmstate_info = {
/* For buffered packets (iso/irq) queue migration */
-static void usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused)
+static int usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused,
+ VMStateField *field, QJSON *vmdesc)
{
struct endp_data *endp = priv;
USBRedirDevice *dev = endp->dev;
@@ -2246,9 +2251,12 @@ static void usbredir_put_bufpq(QEMUFile *f, void *priv, size_t unused)
i++;
}
assert(i == endp->bufpq_size);
+
+ return 0;
}
-static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused)
+static int usbredir_get_bufpq(QEMUFile *f, void *priv, size_t unused,
+ VMStateField *field)
{
struct endp_data *endp = priv;
USBRedirDevice *dev = endp->dev;
@@ -2351,7 +2359,8 @@ static const VMStateDescription usbredir_ep_vmstate = {
/* For PacketIdQueue migration */
-static void usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused)
+static int usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused,
+ VMStateField *field, QJSON *vmdesc)
{
struct PacketIdQueue *q = priv;
USBRedirDevice *dev = q->dev;
@@ -2365,9 +2374,12 @@ static void usbredir_put_packet_id_q(QEMUFile *f, void *priv, size_t unused)
remain--;
}
assert(remain == 0);
+
+ return 0;
}
-static int usbredir_get_packet_id_q(QEMUFile *f, void *priv, size_t unused)
+static int usbredir_get_packet_id_q(QEMUFile *f, void *priv, size_t unused,
+ VMStateField *field)
{
struct PacketIdQueue *q = priv;
USBRedirDevice *dev = q->dev;
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
index 811eecd1b4..6c771f778b 100644
--- a/hw/vfio/pci-quirks.c
+++ b/hw/vfio/pci-quirks.c
@@ -1171,7 +1171,7 @@ static int vfio_pci_igd_host_init(VFIOPCIDevice *vdev,
* IGD LPC/ISA bridge support code. The vBIOS needs this, but we can't write
* arbitrary values into just any bridge, so we must create our own. We try
* to handle if the user has created it for us, which they might want to do
- * to enable multifuction so we don't occupy the whole PCI slot.
+ * to enable multifunction so we don't occupy the whole PCI slot.
*/
static void vfio_pci_igd_lpc_bridge_realize(PCIDevice *pdev, Error **errp)
{
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index d7dbe0e3e0..882d3a91b6 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -1881,8 +1881,8 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
* 0 is reserved for this since absence of capabilities is indicated by
* 0 for the ID, version, AND next pointer. However, pcie_add_capability()
* uses ID 0 as reserved for list management and will incorrectly match and
- * assert if we attempt to pre-load the head of the chain with with this
- * ID. Use ID 0xFFFF temporarily since it is also seems to be reserved in
+ * assert if we attempt to pre-load the head of the chain with this ID.
+ * Use ID 0xFFFF temporarily since it is also seems to be reserved in
* part for identifying absence of capabilities in a root complex register
* block. If the ID still exists after adding capabilities, switch back to
* zero. We'll mark this entire first dword as emulated for this purpose.
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index 95c4c30ea1..765d363c1f 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -1,3 +1,4 @@
+ifeq ($(CONFIG_VIRTIO),y)
common-obj-y += virtio-rng.o
common-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o
common-obj-y += virtio-bus.o
@@ -5,7 +6,10 @@ common-obj-y += virtio-mmio.o
obj-y += virtio.o virtio-balloon.o
obj-$(CONFIG_LINUX) += vhost.o vhost-backend.o vhost-user.o
-
obj-$(CONFIG_VHOST_VSOCK) += vhost-vsock.o
obj-y += virtio-crypto.o
obj-$(CONFIG_VIRTIO_PCI) += virtio-crypto-pci.o
+endif
+
+common-obj-$(call lnot,$(CONFIG_LINUX)) += vhost-stub.o
+common-obj-$(CONFIG_ALL) += vhost-stub.o
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 7b6f55e70e..6926eedd3f 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -15,6 +15,8 @@ virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed"
virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left"
# hw/virtio/virtio-balloon.c
+#
+virtio_balloon_bad_addr(uint64_t gpa) "%"PRIx64
virtio_balloon_handle_output(const char *name, uint64_t gpa) "section name: %s gpa: %"PRIx64
virtio_balloon_get_config(uint32_t num_pages, uint32_t actual) "num_pages: %d actual: %d"
virtio_balloon_set_config(uint32_t actual, uint32_t oldactual) "actual: %d oldactual: %d"
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 272a5ec584..be927b891e 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -185,6 +185,102 @@ static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
}
#endif /* CONFIG_VHOST_VSOCK */
+static void vhost_kernel_iotlb_read(void *opaque)
+{
+ struct vhost_dev *dev = opaque;
+ struct vhost_msg msg;
+ ssize_t len;
+
+ while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
+ struct vhost_iotlb_msg *imsg = &msg.iotlb;
+ if (len < sizeof msg) {
+ error_report("Wrong vhost message len: %d", (int)len);
+ break;
+ }
+ if (msg.type != VHOST_IOTLB_MSG) {
+ error_report("Unknown vhost iotlb message type");
+ break;
+ }
+ switch (imsg->type) {
+ case VHOST_IOTLB_MISS:
+ vhost_device_iotlb_miss(dev, imsg->iova,
+ imsg->perm != VHOST_ACCESS_RO);
+ break;
+ case VHOST_IOTLB_UPDATE:
+ case VHOST_IOTLB_INVALIDATE:
+ error_report("Unexpected IOTLB message type");
+ break;
+ case VHOST_IOTLB_ACCESS_FAIL:
+ /* FIXME: report device iotlb error */
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int vhost_kernel_update_device_iotlb(struct vhost_dev *dev,
+ uint64_t iova, uint64_t uaddr,
+ uint64_t len,
+ IOMMUAccessFlags perm)
+{
+ struct vhost_msg msg;
+ msg.type = VHOST_IOTLB_MSG;
+ msg.iotlb.iova = iova;
+ msg.iotlb.uaddr = uaddr;
+ msg.iotlb.size = len;
+ msg.iotlb.type = VHOST_IOTLB_UPDATE;
+
+ switch (perm) {
+ case IOMMU_RO:
+ msg.iotlb.perm = VHOST_ACCESS_RO;
+ break;
+ case IOMMU_WO:
+ msg.iotlb.perm = VHOST_ACCESS_WO;
+ break;
+ case IOMMU_RW:
+ msg.iotlb.perm = VHOST_ACCESS_RW;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
+ error_report("Fail to update device iotlb");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int vhost_kernel_invalidate_device_iotlb(struct vhost_dev *dev,
+ uint64_t iova, uint64_t len)
+{
+ struct vhost_msg msg;
+
+ msg.type = VHOST_IOTLB_MSG;
+ msg.iotlb.iova = iova;
+ msg.iotlb.size = len;
+ msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
+
+ if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
+ error_report("Fail to invalidate device iotlb");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
+ int enabled)
+{
+ if (enabled)
+ qemu_set_fd_handler((uintptr_t)dev->opaque,
+ vhost_kernel_iotlb_read, NULL, dev);
+ else
+ qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
+}
+
static const VhostOps kernel_ops = {
.backend_type = VHOST_BACKEND_TYPE_KERNEL,
.vhost_backend_init = vhost_kernel_init,
@@ -214,6 +310,9 @@ static const VhostOps kernel_ops = {
.vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
.vhost_vsock_set_running = vhost_kernel_vsock_set_running,
#endif /* CONFIG_VHOST_VSOCK */
+ .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
+ .vhost_update_device_iotlb = vhost_kernel_update_device_iotlb,
+ .vhost_invalidate_device_iotlb = vhost_kernel_invalidate_device_iotlb,
};
int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
diff --git a/stubs/vhost.c b/hw/virtio/vhost-stub.c
index 2d76cdebdc..2d76cdebdc 100644
--- a/stubs/vhost.c
+++ b/hw/virtio/vhost-stub.c
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 7ee92b32c5..9334a8ae22 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -32,6 +32,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
VHOST_USER_PROTOCOL_F_RARP = 2,
VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -59,6 +60,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_QUEUE_NUM = 17,
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_NET_SET_MTU = 20,
VHOST_USER_MAX
} VhostUserRequest;
@@ -186,6 +188,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request)
case VHOST_USER_RESET_OWNER:
case VHOST_USER_SET_MEM_TABLE:
case VHOST_USER_GET_QUEUE_NUM:
+ case VHOST_USER_NET_SET_MTU:
return true;
default:
return false;
@@ -685,6 +688,36 @@ static bool vhost_user_can_merge(struct vhost_dev *dev,
return mfd == rfd;
}
+static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
+{
+ VhostUserMsg msg;
+ bool reply_supported = virtio_has_feature(dev->protocol_features,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK);
+
+ if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
+ return 0;
+ }
+
+ msg.request = VHOST_USER_NET_SET_MTU;
+ msg.payload.u64 = mtu;
+ msg.size = sizeof(msg.payload.u64);
+ msg.flags = VHOST_USER_VERSION;
+ if (reply_supported) {
+ msg.flags |= VHOST_USER_NEED_REPLY_MASK;
+ }
+
+ if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+ return -1;
+ }
+
+ /* If reply_ack supported, slave has to ack specified MTU is valid */
+ if (reply_supported) {
+ return process_message_reply(dev, msg.request);
+ }
+
+ return 0;
+}
+
const VhostOps user_ops = {
.backend_type = VHOST_BACKEND_TYPE_USER,
.vhost_backend_init = vhost_user_init,
@@ -708,4 +741,5 @@ const VhostOps user_ops = {
.vhost_requires_shm_log = vhost_user_requires_shm_log,
.vhost_migration_done = vhost_user_migration_done,
.vhost_backend_can_merge = vhost_user_can_merge,
+ .vhost_net_set_mtu = vhost_user_net_set_mtu,
};
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index f7f70237db..b124d97d7c 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -26,6 +26,7 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "migration/migration.h"
+#include "sysemu/dma.h"
/* enabled until disconnected backend stabilizes */
#define _VHOST_DEBUG 1
@@ -421,8 +422,36 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
dev->log_size = size;
}
+static int vhost_dev_has_iommu(struct vhost_dev *dev)
+{
+ VirtIODevice *vdev = dev->vdev;
+ AddressSpace *dma_as = vdev->dma_as;
+
+ return memory_region_is_iommu(dma_as->root) &&
+ virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+}
+
+static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
+ hwaddr *plen, int is_write)
+{
+ if (!vhost_dev_has_iommu(dev)) {
+ return cpu_physical_memory_map(addr, plen, is_write);
+ } else {
+ return (void *)(uintptr_t)addr;
+ }
+}
+
+static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
+ hwaddr len, int is_write,
+ hwaddr access_len)
+{
+ if (!vhost_dev_has_iommu(dev)) {
+ cpu_physical_memory_unmap(buffer, len, is_write, access_len);
+ }
+}
-static int vhost_verify_ring_part_mapping(void *part,
+static int vhost_verify_ring_part_mapping(struct vhost_dev *dev,
+ void *part,
uint64_t part_addr,
uint64_t part_size,
uint64_t start_addr,
@@ -436,14 +465,14 @@ static int vhost_verify_ring_part_mapping(void *part,
return 0;
}
l = part_size;
- p = cpu_physical_memory_map(part_addr, &l, 1);
+ p = vhost_memory_map(dev, part_addr, &l, 1);
if (!p || l != part_size) {
r = -ENOMEM;
}
if (p != part) {
r = -EBUSY;
}
- cpu_physical_memory_unmap(p, l, 0, 0);
+ vhost_memory_unmap(dev, p, l, 0, 0);
return r;
}
@@ -463,21 +492,21 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev,
struct vhost_virtqueue *vq = dev->vqs + i;
j = 0;
- r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
+ r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys,
vq->desc_size, start_addr, size);
if (!r) {
break;
}
j++;
- r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
+ r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys,
vq->avail_size, start_addr, size);
if (!r) {
break;
}
j++;
- r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
+ r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys,
vq->used_size, start_addr, size);
if (!r) {
break;
@@ -715,7 +744,8 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
return 0;
}
-static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
+static int vhost_dev_set_features(struct vhost_dev *dev,
+ bool enable_log)
{
uint64_t features = dev->acked_features;
int r;
@@ -858,6 +888,56 @@ static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
return -errno;
}
+static int vhost_memory_region_lookup(struct vhost_dev *hdev,
+ uint64_t gpa, uint64_t *uaddr,
+ uint64_t *len)
+{
+ int i;
+
+ for (i = 0; i < hdev->mem->nregions; i++) {
+ struct vhost_memory_region *reg = hdev->mem->regions + i;
+
+ if (gpa >= reg->guest_phys_addr &&
+ reg->guest_phys_addr + reg->memory_size > gpa) {
+ *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
+ *len = reg->guest_phys_addr + reg->memory_size - gpa;
+ return 0;
+ }
+ }
+
+ return -EFAULT;
+}
+
+void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
+{
+ IOMMUTLBEntry iotlb;
+ uint64_t uaddr, len;
+
+ rcu_read_lock();
+
+ iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
+ iova, write);
+ if (iotlb.target_as != NULL) {
+ if (vhost_memory_region_lookup(dev, iotlb.translated_addr,
+ &uaddr, &len)) {
+ error_report("Fail to lookup the translated address "
+ "%"PRIx64, iotlb.translated_addr);
+ goto out;
+ }
+
+ len = MIN(iotlb.addr_mask + 1, len);
+ iova = iova & ~iotlb.addr_mask;
+
+ if (dev->vhost_ops->vhost_update_device_iotlb(dev, iova, uaddr,
+ len, iotlb.perm)) {
+ error_report("Fail to update device iotlb");
+ goto out;
+ }
+ }
+out:
+ rcu_read_unlock();
+}
+
static int vhost_virtqueue_start(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
@@ -903,21 +983,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
- vq->desc = cpu_physical_memory_map(a, &l, 0);
+ vq->desc = vhost_memory_map(dev, a, &l, 0);
if (!vq->desc || l != s) {
r = -ENOMEM;
goto fail_alloc_desc;
}
vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
- vq->avail = cpu_physical_memory_map(a, &l, 0);
+ vq->avail = vhost_memory_map(dev, a, &l, 0);
if (!vq->avail || l != s) {
r = -ENOMEM;
goto fail_alloc_avail;
}
vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
- vq->used = cpu_physical_memory_map(a, &l, 1);
+ vq->used = vhost_memory_map(dev, a, &l, 1);
if (!vq->used || l != s) {
r = -ENOMEM;
goto fail_alloc_used;
@@ -963,14 +1043,14 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
fail_vector:
fail_kick:
fail_alloc:
- cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
- 0, 0);
+ vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
+ 0, 0);
fail_alloc_used:
- cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
- 0, 0);
+ vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
+ 0, 0);
fail_alloc_avail:
- cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
- 0, 0);
+ vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
+ 0, 0);
fail_alloc_desc:
return r;
}
@@ -993,6 +1073,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
virtio_queue_set_last_avail_idx(vdev, idx, state.num);
}
virtio_queue_invalidate_signalled_used(vdev, idx);
+ virtio_queue_update_used_idx(vdev, idx);
/* In the cross-endian case, we need to reset the vring endianness to
* native as legacy devices expect so by default.
@@ -1003,12 +1084,12 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
vhost_vq_index);
}
- cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
- 1, virtio_queue_get_used_size(vdev, idx));
- cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
- 0, virtio_queue_get_avail_size(vdev, idx));
- cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
- 0, virtio_queue_get_desc_size(vdev, idx));
+ vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
+ 1, virtio_queue_get_used_size(vdev, idx));
+ vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
+ 0, virtio_queue_get_avail_size(vdev, idx));
+ vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
+ 0, virtio_queue_get_desc_size(vdev, idx));
}
static void vhost_eventfd_add(MemoryListener *listener,
@@ -1065,6 +1146,9 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
r = -errno;
goto fail_call;
}
+
+ vq->dev = dev;
+
return 0;
fail_call:
event_notifier_cleanup(&vq->masked_notifier);
@@ -1076,12 +1160,25 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
event_notifier_cleanup(&vq->masked_notifier);
}
+static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
+{
+ struct vhost_dev *hdev = container_of(n, struct vhost_dev, n);
+
+ if (hdev->vhost_ops->vhost_invalidate_device_iotlb(hdev,
+ iotlb->iova,
+ iotlb->addr_mask + 1)) {
+ error_report("Fail to invalidate device iotlb");
+ }
+}
+
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type, uint32_t busyloop_timeout)
{
uint64_t features;
int i, r, n_initialized_vqs = 0;
+ Error *local_err = NULL;
+ hdev->vdev = NULL;
hdev->migration_blocker = NULL;
r = vhost_set_backend_type(hdev, backend_type);
@@ -1146,6 +1243,9 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
.priority = 10
};
+ hdev->n.notify = vhost_iommu_unmap_notify;
+ hdev->n.notifier_flags = IOMMU_NOTIFIER_UNMAP;
+
if (hdev->migration_blocker == NULL) {
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
@@ -1157,7 +1257,12 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
}
if (hdev->migration_blocker != NULL) {
- migrate_add_blocker(hdev->migration_blocker);
+ r = migrate_add_blocker(hdev->migration_blocker, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ error_free(hdev->migration_blocker);
+ goto fail_busyloop;
+ }
}
hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
@@ -1341,11 +1446,18 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
assert(hdev->vhost_ops);
hdev->started = true;
+ hdev->vdev = vdev;
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
goto fail_features;
}
+
+ if (vhost_dev_has_iommu(hdev)) {
+ memory_region_register_iommu_notifier(vdev->dma_as->root,
+ &hdev->n);
+ }
+
r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
if (r < 0) {
VHOST_OPS_DEBUG("vhost_set_mem_table failed");
@@ -1379,6 +1491,16 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
}
}
+ if (vhost_dev_has_iommu(hdev)) {
+ hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
+
+ /* Update used ring information for IOTLB to work correctly,
+ * vhost-kernel code requires for this.*/
+ for (i = 0; i < hdev->nvqs; ++i) {
+ struct vhost_virtqueue *vq = hdev->vqs + i;
+ vhost_device_iotlb_miss(hdev, vq->used_phys, true);
+ }
+ }
return 0;
fail_log:
vhost_log_put(hdev, false);
@@ -1390,6 +1512,7 @@ fail_vq:
hdev->vq_index + i);
}
i = hdev->nvqs;
+
fail_mem:
fail_features:
@@ -1412,8 +1535,14 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
hdev->vq_index + i);
}
+ if (vhost_dev_has_iommu(hdev)) {
+ hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
+ memory_region_unregister_iommu_notifier(vdev->dma_as->root,
+ &hdev->n);
+ }
vhost_log_put(hdev, true);
hdev->started = false;
+ hdev->vdev = NULL;
}
int vhost_net_set_backend(struct vhost_dev *hdev,
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 884570a57d..a705e0ec55 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -228,8 +228,13 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
/* FIXME: remove get_system_memory(), but how? */
section = memory_region_find(get_system_memory(), pa, 1);
- if (!int128_nz(section.size) || !memory_region_is_ram(section.mr))
+ if (!int128_nz(section.size) ||
+ !memory_region_is_ram(section.mr) ||
+ memory_region_is_rom(section.mr) ||
+ memory_region_is_romd(section.mr)) {
+ trace_virtio_balloon_bad_addr(pa);
continue;
+ }
trace_virtio_balloon_handle_output(memory_region_name(section.mr),
pa);
diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c
index d6c0c72bd2..a886011e75 100644
--- a/hw/virtio/virtio-bus.c
+++ b/hw/virtio/virtio-bus.c
@@ -28,6 +28,7 @@
#include "hw/qdev.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio.h"
+#include "exec/address-spaces.h"
/* #define DEBUG_VIRTIO_BUS */
@@ -46,6 +47,7 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp)
VirtioBusState *bus = VIRTIO_BUS(qbus);
VirtioBusClass *klass = VIRTIO_BUS_GET_CLASS(bus);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ bool has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
DPRINTF("%s: plug device.\n", qbus->name);
@@ -61,6 +63,13 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp)
if (klass->device_plugged != NULL) {
klass->device_plugged(qbus->parent, errp);
}
+
+ if (klass->get_dma_as != NULL && has_iommu) {
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_IOMMU_PLATFORM);
+ vdev->dma_as = klass->get_dma_as(qbus->parent);
+ } else {
+ vdev->dma_as = &address_space_memory;
+ }
}
/* Reset the virtio_bus */
diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c
index a1b09064c0..422aca3a98 100644
--- a/hw/virtio/virtio-crypto-pci.c
+++ b/hw/virtio/virtio-crypto-pci.c
@@ -31,6 +31,11 @@ static void virtio_crypto_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
VirtIOCryptoPCI *vcrypto = VIRTIO_CRYPTO_PCI(vpci_dev);
DeviceState *vdev = DEVICE(&vcrypto->vdev);
+ if (vcrypto->vdev.conf.cryptodev == NULL) {
+ error_setg(errp, "'cryptodev' parameter expects a valid object");
+ return;
+ }
+
qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
virtio_pci_force_virtio_1(vpci_dev);
object_property_set_bool(OBJECT(vdev), true, "realized", errp);
@@ -48,7 +53,6 @@ static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
k->realize = virtio_crypto_pci_realize;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->props = virtio_crypto_pci_properties;
- dc->hotpluggable = false;
pcidev_k->class_id = PCI_CLASS_OTHERS;
}
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 2f2467e859..0353eb6d5d 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -337,7 +337,18 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req)
{
if (req) {
if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
- g_free(req->u.sym_op_info);
+ size_t max_len;
+ CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info;
+
+ max_len = op_info->iv_len +
+ op_info->aad_len +
+ op_info->src_len +
+ op_info->dst_len +
+ op_info->digest_result_len;
+
+ /* Zeroize and free request data structure */
+ memset(op_info, 0, sizeof(*op_info) + max_len);
+ g_free(op_info);
}
g_free(req);
}
@@ -355,7 +366,7 @@ virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
return;
}
- len = sym_op_info->dst_len;
+ len = sym_op_info->src_len;
/* Save the cipher result */
s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
if (s != len) {
@@ -416,7 +427,7 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
uint32_t hash_start_src_offset = 0, len_to_hash = 0;
uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
- size_t max_len, curr_size = 0;
+ uint64_t max_len, curr_size = 0;
size_t s;
/* Plain cipher */
@@ -441,7 +452,7 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev,
return NULL;
}
- max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
+ max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len;
if (unlikely(max_len > vcrypto->conf.max_size)) {
virtio_error(vdev, "virtio-crypto too big length");
return NULL;
@@ -732,7 +743,7 @@ static void virtio_crypto_reset(VirtIODevice *vdev)
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
/* multiqueue is disabled by default */
vcrypto->curr_queues = 1;
- if (!vcrypto->cryptodev->ready) {
+ if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
} else {
vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
@@ -775,7 +786,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1);
if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
- "must be a postive integer less than %d.",
+ "must be a positive integer less than %d.",
vcrypto->max_queues, VIRTIO_QUEUE_MAX);
return;
}
@@ -792,13 +803,14 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
}
vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
- if (!vcrypto->cryptodev->ready) {
+ if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) {
vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY;
} else {
vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY;
}
virtio_crypto_init_config(vdev);
+ cryptodev_backend_set_used(vcrypto->cryptodev, true);
}
static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
@@ -818,6 +830,7 @@ static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
g_free(vcrypto->vqs);
virtio_cleanup(vdev);
+ cryptodev_backend_set_used(vcrypto->cryptodev, false);
}
static const VMStateDescription vmstate_virtio_crypto = {
@@ -875,6 +888,20 @@ static void virtio_crypto_class_init(ObjectClass *klass, void *data)
vdc->reset = virtio_crypto_reset;
}
+static void
+virtio_crypto_check_cryptodev_is_used(Object *obj, const char *name,
+ Object *val, Error **errp)
+{
+ if (cryptodev_backend_is_used(CRYPTODEV_BACKEND(val))) {
+ char *path = object_get_canonical_path_component(val);
+ error_setg(errp,
+ "can't use already used cryptodev backend: %s", path);
+ g_free(path);
+ } else {
+ qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
+ }
+}
+
static void virtio_crypto_instance_init(Object *obj)
{
VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj);
@@ -888,7 +915,7 @@ static void virtio_crypto_instance_init(Object *obj)
object_property_add_link(obj, "cryptodev",
TYPE_CRYPTODEV_BACKEND,
(Object **)&vcrypto->conf.cryptodev,
- qdev_prop_allow_set_link_before_realize,
+ virtio_crypto_check_cryptodev_is_used,
OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
}
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index 17412cb7b5..5807aa87fe 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -20,6 +20,7 @@
*/
#include "qemu/osdep.h"
+#include "standard-headers/linux/virtio_mmio.h"
#include "hw/sysbus.h"
#include "hw/virtio/virtio.h"
#include "qemu/host-utils.h"
@@ -52,28 +53,6 @@ do { printf("virtio_mmio: " fmt , ## __VA_ARGS__); } while (0)
#define VIRTIO_MMIO(obj) \
OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO)
-/* Memory mapped register offsets */
-#define VIRTIO_MMIO_MAGIC 0x0
-#define VIRTIO_MMIO_VERSION 0x4
-#define VIRTIO_MMIO_DEVICEID 0x8
-#define VIRTIO_MMIO_VENDORID 0xc
-#define VIRTIO_MMIO_HOSTFEATURES 0x10
-#define VIRTIO_MMIO_HOSTFEATURESSEL 0x14
-#define VIRTIO_MMIO_GUESTFEATURES 0x20
-#define VIRTIO_MMIO_GUESTFEATURESSEL 0x24
-#define VIRTIO_MMIO_GUESTPAGESIZE 0x28
-#define VIRTIO_MMIO_QUEUESEL 0x30
-#define VIRTIO_MMIO_QUEUENUMMAX 0x34
-#define VIRTIO_MMIO_QUEUENUM 0x38
-#define VIRTIO_MMIO_QUEUEALIGN 0x3c
-#define VIRTIO_MMIO_QUEUEPFN 0x40
-#define VIRTIO_MMIO_QUEUENOTIFY 0x50
-#define VIRTIO_MMIO_INTERRUPTSTATUS 0x60
-#define VIRTIO_MMIO_INTERRUPTACK 0x64
-#define VIRTIO_MMIO_STATUS 0x70
-/* Device specific config space starts here */
-#define VIRTIO_MMIO_CONFIG 0x100
-
#define VIRT_MAGIC 0x74726976 /* 'virt' */
#define VIRT_VERSION 1
#define VIRT_VENDOR 0x554D4551 /* 'QEMU' */
@@ -104,10 +83,10 @@ static int virtio_mmio_ioeventfd_assign(DeviceState *d,
VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
if (assign) {
- memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
true, n, notifier);
} else {
- memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUENOTIFY, 4,
+ memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
true, n, notifier);
}
return 0;
@@ -140,11 +119,11 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
* device ID of zero means no backend will claim it.
*/
switch (offset) {
- case VIRTIO_MMIO_MAGIC:
+ case VIRTIO_MMIO_MAGIC_VALUE:
return VIRT_MAGIC;
case VIRTIO_MMIO_VERSION:
return VIRT_VERSION;
- case VIRTIO_MMIO_VENDORID:
+ case VIRTIO_MMIO_VENDOR_ID:
return VIRT_VENDOR;
default:
return 0;
@@ -169,40 +148,40 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
return 0;
}
switch (offset) {
- case VIRTIO_MMIO_MAGIC:
+ case VIRTIO_MMIO_MAGIC_VALUE:
return VIRT_MAGIC;
case VIRTIO_MMIO_VERSION:
return VIRT_VERSION;
- case VIRTIO_MMIO_DEVICEID:
+ case VIRTIO_MMIO_DEVICE_ID:
return vdev->device_id;
- case VIRTIO_MMIO_VENDORID:
+ case VIRTIO_MMIO_VENDOR_ID:
return VIRT_VENDOR;
- case VIRTIO_MMIO_HOSTFEATURES:
+ case VIRTIO_MMIO_DEVICE_FEATURES:
if (proxy->host_features_sel) {
return 0;
}
return vdev->host_features;
- case VIRTIO_MMIO_QUEUENUMMAX:
+ case VIRTIO_MMIO_QUEUE_NUM_MAX:
if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
return 0;
}
return VIRTQUEUE_MAX_SIZE;
- case VIRTIO_MMIO_QUEUEPFN:
+ case VIRTIO_MMIO_QUEUE_PFN:
return virtio_queue_get_addr(vdev, vdev->queue_sel)
>> proxy->guest_page_shift;
- case VIRTIO_MMIO_INTERRUPTSTATUS:
+ case VIRTIO_MMIO_INTERRUPT_STATUS:
return atomic_read(&vdev->isr);
case VIRTIO_MMIO_STATUS:
return vdev->status;
- case VIRTIO_MMIO_HOSTFEATURESSEL:
- case VIRTIO_MMIO_GUESTFEATURES:
- case VIRTIO_MMIO_GUESTFEATURESSEL:
- case VIRTIO_MMIO_GUESTPAGESIZE:
- case VIRTIO_MMIO_QUEUESEL:
- case VIRTIO_MMIO_QUEUENUM:
- case VIRTIO_MMIO_QUEUEALIGN:
- case VIRTIO_MMIO_QUEUENOTIFY:
- case VIRTIO_MMIO_INTERRUPTACK:
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
+ case VIRTIO_MMIO_DRIVER_FEATURES:
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE:
+ case VIRTIO_MMIO_QUEUE_SEL:
+ case VIRTIO_MMIO_QUEUE_NUM:
+ case VIRTIO_MMIO_QUEUE_ALIGN:
+ case VIRTIO_MMIO_QUEUE_NOTIFY:
+ case VIRTIO_MMIO_INTERRUPT_ACK:
DPRINTF("read of write-only register\n");
return 0;
default:
@@ -251,18 +230,18 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
return;
}
switch (offset) {
- case VIRTIO_MMIO_HOSTFEATURESSEL:
+ case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
proxy->host_features_sel = value;
break;
- case VIRTIO_MMIO_GUESTFEATURES:
+ case VIRTIO_MMIO_DRIVER_FEATURES:
if (!proxy->guest_features_sel) {
virtio_set_features(vdev, value);
}
break;
- case VIRTIO_MMIO_GUESTFEATURESSEL:
+ case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
proxy->guest_features_sel = value;
break;
- case VIRTIO_MMIO_GUESTPAGESIZE:
+ case VIRTIO_MMIO_GUEST_PAGE_SIZE:
proxy->guest_page_shift = ctz32(value);
if (proxy->guest_page_shift > 31) {
proxy->guest_page_shift = 0;
@@ -270,22 +249,22 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
DPRINTF("guest page size %" PRIx64 " shift %d\n", value,
proxy->guest_page_shift);
break;
- case VIRTIO_MMIO_QUEUESEL:
+ case VIRTIO_MMIO_QUEUE_SEL:
if (value < VIRTIO_QUEUE_MAX) {
vdev->queue_sel = value;
}
break;
- case VIRTIO_MMIO_QUEUENUM:
+ case VIRTIO_MMIO_QUEUE_NUM:
DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
virtio_queue_set_num(vdev, vdev->queue_sel, value);
/* Note: only call this function for legacy devices */
virtio_queue_update_rings(vdev, vdev->queue_sel);
break;
- case VIRTIO_MMIO_QUEUEALIGN:
+ case VIRTIO_MMIO_QUEUE_ALIGN:
/* Note: this is only valid for legacy devices */
virtio_queue_set_align(vdev, vdev->queue_sel, value);
break;
- case VIRTIO_MMIO_QUEUEPFN:
+ case VIRTIO_MMIO_QUEUE_PFN:
if (value == 0) {
virtio_reset(vdev);
} else {
@@ -293,12 +272,12 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
value << proxy->guest_page_shift);
}
break;
- case VIRTIO_MMIO_QUEUENOTIFY:
+ case VIRTIO_MMIO_QUEUE_NOTIFY:
if (value < VIRTIO_QUEUE_MAX) {
virtio_queue_notify(vdev, value);
}
break;
- case VIRTIO_MMIO_INTERRUPTACK:
+ case VIRTIO_MMIO_INTERRUPT_ACK:
atomic_and(&vdev->isr, ~value);
virtio_update_irq(vdev);
break;
@@ -317,13 +296,13 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
virtio_reset(vdev);
}
break;
- case VIRTIO_MMIO_MAGIC:
+ case VIRTIO_MMIO_MAGIC_VALUE:
case VIRTIO_MMIO_VERSION:
- case VIRTIO_MMIO_DEVICEID:
- case VIRTIO_MMIO_VENDORID:
- case VIRTIO_MMIO_HOSTFEATURES:
- case VIRTIO_MMIO_QUEUENUMMAX:
- case VIRTIO_MMIO_INTERRUPTSTATUS:
+ case VIRTIO_MMIO_DEVICE_ID:
+ case VIRTIO_MMIO_VENDOR_ID:
+ case VIRTIO_MMIO_DEVICE_FEATURES:
+ case VIRTIO_MMIO_QUEUE_NUM_MAX:
+ case VIRTIO_MMIO_INTERRUPT_STATUS:
DPRINTF("write to readonly register\n");
break;
@@ -402,7 +381,7 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
event_notifier_cleanup(notifier);
}
- if (vdc->guest_notifier_mask) {
+ if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
vdc->guest_notifier_mask(vdev, n, !assign);
}
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 21c2b9dbfc..b5af2a00f3 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -108,7 +108,8 @@ static bool virtio_pci_has_extra_state(DeviceState *d)
return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA;
}
-static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
+static int get_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
VirtIOPCIProxy *proxy = pv;
int i;
@@ -137,7 +138,8 @@ static void virtio_pci_save_modern_queue_state(VirtIOPCIQueue *vq,
qemu_put_be32(f, vq->used[1]);
}
-static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
+static int put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
VirtIOPCIProxy *proxy = pv;
int i;
@@ -149,6 +151,8 @@ static void put_virtio_pci_modern_state(QEMUFile *f, void *pv, size_t size)
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
virtio_pci_save_modern_queue_state(&proxy->vqs[i], f);
}
+
+ return 0;
}
static const VMStateInfo vmstate_info_virtio_pci_modern_state = {
@@ -1144,6 +1148,14 @@ static int virtio_pci_query_nvectors(DeviceState *d)
return proxy->nvectors;
}
+static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
+{
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
+ PCIDevice *dev = &proxy->pci_dev;
+
+ return pci_get_address_space(dev);
+}
+
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
struct virtio_pci_cap *cap)
{
@@ -1308,7 +1320,6 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr,
virtio_queue_set_vector(vdev, vdev->queue_sel, val);
break;
case VIRTIO_PCI_COMMON_Q_ENABLE:
- /* TODO: need a way to put num back on reset. */
virtio_queue_set_num(vdev, vdev->queue_sel,
proxy->vqs[vdev->queue_sel].num);
virtio_queue_set_rings(vdev, vdev->queue_sel,
@@ -1601,6 +1612,11 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
}
if (legacy) {
+ if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
+ error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by"
+ "neither legacy nor transitional device.");
+ return ;
+ }
/* legacy and transitional */
pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
pci_get_word(config + PCI_VENDOR_ID));
@@ -1802,6 +1818,11 @@ static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
* PCI Power Management Interface Specification.
*/
pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3);
+
+ if (proxy->flags & VIRTIO_PCI_FLAG_ATS) {
+ pcie_ats_init(pci_dev, 256);
+ }
+
} else {
/*
* make future invocations of pci_is_express() return false
@@ -1855,6 +1876,8 @@ static Property virtio_pci_properties[] = {
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false),
DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy,
ignore_backend_features, false),
+ DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_ATS_BIT, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2258,7 +2281,7 @@ static const TypeInfo virtio_serial_pci_info = {
static Property virtio_net_properties[] = {
DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
- VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2520,6 +2543,7 @@ static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
k->query_nvectors = virtio_pci_query_nvectors;
k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled;
k->ioeventfd_assign = virtio_pci_ioeventfd_assign;
+ k->get_dma_as = virtio_pci_get_dma_as;
}
static const TypeInfo virtio_pci_bus_info = {
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index 5e078866c4..d00064cc0c 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -72,6 +72,7 @@ enum {
VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT,
VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT,
VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT,
+ VIRTIO_PCI_FLAG_ATS_BIT,
};
/* Need to activate work-arounds for buggy guests at vmstate load. */
@@ -96,6 +97,9 @@ enum {
#define VIRTIO_PCI_FLAG_PAGE_PER_VQ \
(1 << VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT)
+/* address space translation service */
+#define VIRTIO_PCI_FLAG_ATS (1 << VIRTIO_PCI_FLAG_ATS_BIT)
+
typedef struct {
MSIMessage msg;
int virq;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 1af2de2714..f292a53940 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -23,6 +23,7 @@
#include "hw/virtio/virtio-bus.h"
#include "migration/migration.h"
#include "hw/virtio/virtio-access.h"
+#include "sysemu/dma.h"
/*
* The alignment to use between consumer and producer parts of vring.
@@ -92,7 +93,7 @@ struct VirtQueue
uint16_t queue_index;
- int inuse;
+ unsigned int inuse;
uint16_t vector;
VirtIOHandleOutput handle_output;
@@ -121,7 +122,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
hwaddr desc_pa, int i)
{
- address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc),
+ address_space_read(vdev->dma_as, desc_pa + i * sizeof(VRingDesc),
MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
virtio_tswap64s(vdev, &desc->addr);
virtio_tswap32s(vdev, &desc->len);
@@ -163,7 +164,7 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
- address_space_write(&address_space_memory, pa, MEMTXATTRS_UNSPECIFIED,
+ address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
(void *)uelem, sizeof(VRingUsedElem));
}
@@ -243,6 +244,7 @@ int virtio_queue_empty(VirtQueue *vq)
static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len)
{
+ AddressSpace *dma_as = vq->vdev->dma_as;
unsigned int offset;
int i;
@@ -250,17 +252,18 @@ static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
for (i = 0; i < elem->in_num; i++) {
size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
- cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
- elem->in_sg[i].iov_len,
- 1, size);
+ dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
+ elem->in_sg[i].iov_len,
+ DMA_DIRECTION_FROM_DEVICE, size);
offset += size;
}
for (i = 0; i < elem->out_num; i++)
- cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
- elem->out_sg[i].iov_len,
- 0, elem->out_sg[i].iov_len);
+ dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
+ elem->out_sg[i].iov_len,
+ DMA_DIRECTION_TO_DEVICE,
+ elem->out_sg[i].iov_len);
}
/* virtqueue_detach_element:
@@ -554,7 +557,10 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
goto out;
}
- iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
+ iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
+ is_write ?
+ DMA_DIRECTION_FROM_DEVICE :
+ DMA_DIRECTION_TO_DEVICE);
if (!iov[num_sg].iov_base) {
virtio_error(vdev, "virtio: bogus descriptor or out of resources");
goto out;
@@ -591,28 +597,19 @@ static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
}
}
-static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
- unsigned int *num_sg, unsigned int max_size,
+static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
+ hwaddr *addr, unsigned int *num_sg,
int is_write)
{
unsigned int i;
hwaddr len;
- /* Note: this function MUST validate input, some callers
- * are passing in num_sg values received over the network.
- */
- /* TODO: teach all callers that this can fail, and return failure instead
- * of asserting here.
- * When we do, we might be able to re-enable NDEBUG below.
- */
-#ifdef NDEBUG
-#error building with NDEBUG is not supported
-#endif
- assert(*num_sg <= max_size);
-
for (i = 0; i < *num_sg; i++) {
len = sg[i].iov_len;
- sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
+ sg[i].iov_base = dma_memory_map(vdev->dma_as,
+ addr[i], &len, is_write ?
+ DMA_DIRECTION_FROM_DEVICE :
+ DMA_DIRECTION_TO_DEVICE);
if (!sg[i].iov_base) {
error_report("virtio: error trying to map MMIO memory");
exit(1);
@@ -624,12 +621,10 @@ static void virtqueue_map_iovec(struct iovec *sg, hwaddr *addr,
}
}
-void virtqueue_map(VirtQueueElement *elem)
+void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
{
- virtqueue_map_iovec(elem->in_sg, elem->in_addr, &elem->in_num,
- VIRTQUEUE_MAX_SIZE, 1);
- virtqueue_map_iovec(elem->out_sg, elem->out_addr, &elem->out_num,
- VIRTQUEUE_MAX_SIZE, 0);
+ virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
+ virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
}
static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
@@ -765,6 +760,44 @@ err_undo_map:
return NULL;
}
+/* virtqueue_drop_all:
+ * @vq: The #VirtQueue
+ * Drops all queued buffers and indicates them to the guest
+ * as if they are done. Useful when buffers can not be
+ * processed but must be returned to the guest.
+ */
+unsigned int virtqueue_drop_all(VirtQueue *vq)
+{
+ unsigned int dropped = 0;
+ VirtQueueElement elem = {};
+ VirtIODevice *vdev = vq->vdev;
+ bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+
+ if (unlikely(vdev->broken)) {
+ return 0;
+ }
+
+ while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
+ /* works similar to virtqueue_pop but does not map buffers
+ * and does not allocate any memory */
+ smp_rmb();
+ if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
+ break;
+ }
+ vq->inuse++;
+ vq->last_avail_idx++;
+ if (fEventIdx) {
+ vring_set_avail_event(vq, vq->last_avail_idx);
+ }
+ /* immediately push the element, nothing to unmap
+ * as both in_num and out_num are set to 0 */
+ virtqueue_push(vq, &elem, 0);
+ dropped++;
+ }
+
+ return dropped;
+}
+
/* Reading and writing a structure directly to QEMUFile is *awful*, but
* it is what QEMU has always done by mistake. We can change it sooner
* or later by bumping the version number of the affected vm states.
@@ -782,7 +815,7 @@ typedef struct VirtQueueElementOld {
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
} VirtQueueElementOld;
-void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
+void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
{
VirtQueueElement *elem;
VirtQueueElementOld data;
@@ -790,6 +823,16 @@ void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
+ /* TODO: teach all callers that this can fail, and return failure instead
+ * of asserting here.
+ * When we do, we might be able to re-enable NDEBUG below.
+ */
+#ifdef NDEBUG
+#error building with NDEBUG is not supported
+#endif
+ assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
+ assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
+
elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
elem->index = data.index;
@@ -813,7 +856,7 @@ void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz)
elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
}
- virtqueue_map(elem);
+ virtqueue_map(vdev, elem);
return elem;
}
@@ -872,6 +915,11 @@ static int virtio_validate_features(VirtIODevice *vdev)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
+ return -EFAULT;
+ }
+
if (k->validate_features) {
return k->validate_features(vdev);
} else {
@@ -1201,6 +1249,11 @@ int virtio_queue_get_num(VirtIODevice *vdev, int n)
return vdev->vq[n].vring.num;
}
+int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
+{
+ return vdev->vq[n].vring.num_default;
+}
+
int virtio_get_num_queues(VirtIODevice *vdev)
{
int i;
@@ -1502,7 +1555,8 @@ static const VMStateDescription vmstate_virtio_ringsize = {
}
};
-static int get_extra_state(QEMUFile *f, void *pv, size_t size)
+static int get_extra_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
VirtIODevice *vdev = pv;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
@@ -1515,13 +1569,15 @@ static int get_extra_state(QEMUFile *f, void *pv, size_t size)
}
}
-static void put_extra_state(QEMUFile *f, void *pv, size_t size)
+static int put_extra_state(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
VirtIODevice *vdev = pv;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
k->save_extra_state(qbus->parent, f);
+ return 0;
}
static const VMStateInfo vmstate_info_extra_state = {
@@ -1656,13 +1712,17 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
}
/* A wrapper for use as a VMState .put function */
-static void virtio_device_put(QEMUFile *f, void *opaque, size_t size)
+static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
virtio_save(VIRTIO_DEVICE(opaque), f);
+
+ return 0;
}
/* A wrapper for use as a VMState .get function */
-static int virtio_device_get(QEMUFile *f, void *opaque, size_t size)
+static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
@@ -1855,9 +1915,11 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
/*
* Some devices migrate VirtQueueElements that have been popped
* from the avail ring but not yet returned to the used ring.
+ * Since max ring size < UINT16_MAX it's safe to use modulo
+ * UINT16_MAX + 1 subtraction.
*/
- vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
- vdev->vq[i].used_idx;
+ vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
+ vdev->vq[i].used_idx);
if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
"used_idx 0x%x",
@@ -1995,6 +2057,11 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
vdev->vq[n].shadow_avail_idx = idx;
}
+void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
+{
+ vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
+}
+
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
{
vdev->vq[n].signalled_used_valid = false;
@@ -2022,10 +2089,10 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd)
{
if (assign && !with_irqfd) {
- event_notifier_set_handler(&vq->guest_notifier, false,
+ event_notifier_set_handler(&vq->guest_notifier,
virtio_queue_guest_notifier_read);
} else {
- event_notifier_set_handler(&vq->guest_notifier, false, NULL);
+ event_notifier_set_handler(&vq->guest_notifier, NULL);
}
if (!assign) {
/* Test and clear notifier before closing it,
@@ -2047,15 +2114,50 @@ static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
}
}
+static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+
+ virtio_queue_set_notification(vq, 0);
+}
+
+static bool virtio_queue_host_notifier_aio_poll(void *opaque)
+{
+ EventNotifier *n = opaque;
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+
+ if (virtio_queue_empty(vq)) {
+ return false;
+ }
+
+ virtio_queue_notify_aio_vq(vq);
+
+ /* In case the handler function re-enabled notifications */
+ virtio_queue_set_notification(vq, 0);
+ return true;
+}
+
+static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+
+ /* Caller polls once more after this to catch requests that race with us */
+ virtio_queue_set_notification(vq, 1);
+}
+
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
VirtIOHandleOutput handle_output)
{
if (handle_output) {
vq->handle_aio_output = handle_output;
aio_set_event_notifier(ctx, &vq->host_notifier, true,
- virtio_queue_host_notifier_aio_read);
+ virtio_queue_host_notifier_aio_read,
+ virtio_queue_host_notifier_aio_poll);
+ aio_set_event_notifier_poll(ctx, &vq->host_notifier,
+ virtio_queue_host_notifier_aio_poll_begin,
+ virtio_queue_host_notifier_aio_poll_end);
} else {
- aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL);
+ aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
/* Test and clear notifier before after disabling event,
* in case poll callback didn't have time to run. */
virtio_queue_host_notifier_aio_read(&vq->host_notifier);
@@ -2162,7 +2264,7 @@ static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
err = r;
goto assign_error;
}
- event_notifier_set_handler(&vq->host_notifier, true,
+ event_notifier_set_handler(&vq->host_notifier,
virtio_queue_host_notifier_read);
}
@@ -2183,7 +2285,7 @@ assign_error:
continue;
}
- event_notifier_set_handler(&vq->host_notifier, true, NULL);
+ event_notifier_set_handler(&vq->host_notifier, NULL);
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
@@ -2209,7 +2311,7 @@ static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
- event_notifier_set_handler(&vq->host_notifier, true, NULL);
+ event_notifier_set_handler(&vq->host_notifier, NULL);
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
diff --git a/include/block/aio.h b/include/block/aio.h
index ca551e346f..7df271d2b9 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -44,6 +44,7 @@ void qemu_aio_ref(void *p);
typedef struct AioHandler AioHandler;
typedef void QEMUBHFunc(void *opaque);
+typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque);
struct ThreadPool;
@@ -52,18 +53,12 @@ struct LinuxAioState;
struct AioContext {
GSource source;
- /* Protects all fields from multi-threaded access */
+ /* Used by AioContext users to protect from multi-threaded access. */
QemuRecMutex lock;
- /* The list of registered AIO handlers */
+ /* The list of registered AIO handlers. Protected by ctx->list_lock. */
QLIST_HEAD(, AioHandler) aio_handlers;
- /* This is a simple lock used to protect the aio_handlers list.
- * Specifically, it's used to ensure that no callbacks are removed while
- * we're walking and dispatching callbacks.
- */
- int walking_handlers;
-
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
* accessed with atomic primitives. If this field is 0, everything
* (file descriptors, bottom halves, timers) will be re-evaluated
@@ -89,17 +84,15 @@ struct AioContext {
*/
uint32_t notify_me;
- /* lock to protect between bh's adders and deleter */
- QemuMutex bh_lock;
+ /* A lock to protect between QEMUBH and AioHandler adders and deleter,
+ * and to ensure that no callbacks are removed while we're walking and
+ * dispatching them.
+ */
+ QemuLockCnt list_lock;
/* Anchor of the list of Bottom Halves belonging to the context */
struct QEMUBH *first_bh;
- /* A simple lock used to protect the first_bh list, and ensure that
- * no callbacks are removed while we're walking and dispatching callbacks.
- */
- int walking_bh;
-
/* Used by aio_notify.
*
* "notified" is used to avoid expensive event_notifier_test_and_clear
@@ -115,7 +108,9 @@ struct AioContext {
bool notified;
EventNotifier notifier;
- /* Thread pool for performing work and receiving completion callbacks */
+ /* Thread pool for performing work and receiving completion callbacks.
+ * Has its own locking.
+ */
struct ThreadPool *thread_pool;
#ifdef CONFIG_LINUX_AIO
@@ -125,11 +120,25 @@ struct AioContext {
struct LinuxAioState *linux_aio;
#endif
- /* TimerLists for calling timers - one per clock type */
+ /* TimerLists for calling timers - one per clock type. Has its own
+ * locking.
+ */
QEMUTimerListGroup tlg;
int external_disable_cnt;
+ /* Number of AioHandlers without .io_poll() */
+ int poll_disable_cnt;
+
+ /* Polling mode parameters */
+ int64_t poll_ns; /* current polling time in nanoseconds */
+ int64_t poll_max_ns; /* maximum polling time in nanoseconds */
+ int64_t poll_grow; /* polling time growth factor */
+ int64_t poll_shrink; /* polling time shrink factor */
+
+ /* Are we in polling mode or monitoring file descriptors? */
+ bool poll_started;
+
/* epoll(7) state used when built with CONFIG_EPOLL */
int epollfd;
bool epoll_enabled;
@@ -167,9 +176,11 @@ void aio_context_unref(AioContext *ctx);
* automatically takes care of calling aio_context_acquire and
* aio_context_release.
*
- * Access to timers and BHs from a thread that has not acquired AioContext
- * is possible. Access to callbacks for now must be done while the AioContext
- * is owned by the thread (FIXME).
+ * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
+ * thread still has to call those to avoid being interrupted by the guest.
+ *
+ * Bottom halves, timers and callbacks can be created or removed without
+ * acquiring the AioContext.
*/
void aio_context_acquire(AioContext *ctx);
@@ -295,8 +306,12 @@ bool aio_pending(AioContext *ctx);
/* Dispatch any pending callbacks from the GSource attached to the AioContext.
*
* This is used internally in the implementation of the GSource.
+ *
+ * @dispatch_fds: true to process fds, false to skip them
+ * (can be used as an optimization by callers that know there
+ * are no fds ready)
*/
-bool aio_dispatch(AioContext *ctx);
+bool aio_dispatch(AioContext *ctx, bool dispatch_fds);
/* Progress in completing AIO work to occur. This can issue new pending
* aio as a result of executing I/O completion or bh callbacks.
@@ -325,8 +340,17 @@ void aio_set_fd_handler(AioContext *ctx,
bool is_external,
IOHandler *io_read,
IOHandler *io_write,
+ AioPollFn *io_poll,
void *opaque);
+/* Set polling begin/end callbacks for a file descriptor that has already been
+ * registered with aio_set_fd_handler. Do nothing if the file descriptor is
+ * not registered.
+ */
+void aio_set_fd_poll(AioContext *ctx, int fd,
+ IOHandler *io_poll_begin,
+ IOHandler *io_poll_end);
+
/* Register an event notifier and associated callbacks. Behaves very similarly
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
* will be invoked when using aio_poll().
@@ -337,7 +361,17 @@ void aio_set_fd_handler(AioContext *ctx,
void aio_set_event_notifier(AioContext *ctx,
EventNotifier *notifier,
bool is_external,
- EventNotifierHandler *io_read);
+ EventNotifierHandler *io_read,
+ AioPollFn *io_poll);
+
+/* Set polling begin/end callbacks for an event notifier that has already been
+ * registered with aio_set_event_notifier. Do nothing if the event notifier is
+ * not registered.
+ */
+void aio_set_event_notifier_poll(AioContext *ctx,
+ EventNotifier *notifier,
+ EventNotifierHandler *io_poll_begin,
+ EventNotifierHandler *io_poll_end);
/* Return a GSource that lets the main loop poll the file descriptors attached
* to this AioContext.
@@ -474,4 +508,17 @@ static inline bool aio_context_in_iothread(AioContext *ctx)
*/
void aio_context_setup(AioContext *ctx);
+/**
+ * aio_context_set_poll_params:
+ * @ctx: the aio context
+ * @max_ns: how long to busy poll for, in nanoseconds
+ * @grow: polling time growth factor
+ * @shrink: polling time shrink factor
+ *
+ * Poll mode can be disabled by setting poll_max_ns to 0.
+ */
+void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
+ int64_t grow, int64_t shrink,
+ Error **errp);
+
#endif
diff --git a/include/block/block.h b/include/block/block.h
index 49bb0b239a..8b0dcdaa70 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -526,8 +526,6 @@ int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
void bdrv_io_plug(BlockDriverState *bs);
void bdrv_io_unplug(BlockDriverState *bs);
-void bdrv_io_unplugged_begin(BlockDriverState *bs);
-void bdrv_io_unplugged_end(BlockDriverState *bs);
/**
* bdrv_drained_begin:
diff --git a/include/block/block_int.h b/include/block/block_int.h
index 83a423c580..2d92d7edfe 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -184,7 +184,7 @@ struct BlockDriver {
/*
* Flushes all data that was already written to the OS all the way down to
- * the disk (for example raw-posix calls fsync()).
+ * the disk (for example file-posix.c calls fsync()).
*/
int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
@@ -526,9 +526,8 @@ struct BlockDriverState {
uint64_t write_threshold_offset;
NotifierWithReturn write_threshold_notifier;
- /* counters for nested bdrv_io_plug and bdrv_io_unplugged_begin */
+ /* counter for nested bdrv_io_plug */
unsigned io_plugged;
- unsigned io_plug_disabled;
int quiesce_counter;
};
diff --git a/include/disas/bfd.h b/include/disas/bfd.h
index 8a3488c2c5..0435b8c9f9 100644
--- a/include/disas/bfd.h
+++ b/include/disas/bfd.h
@@ -222,6 +222,10 @@ enum bfd_architecture
bfd_arch_ia64, /* HP/Intel ia64 */
#define bfd_mach_ia64_elf64 64
#define bfd_mach_ia64_elf32 32
+ bfd_arch_nios2, /* Nios II */
+#define bfd_mach_nios2 0
+#define bfd_mach_nios2r1 1
+#define bfd_mach_nios2r2 2
bfd_arch_lm32, /* Lattice Mico32 */
#define bfd_mach_lm32 1
bfd_arch_last
@@ -415,6 +419,8 @@ int print_insn_crisv10 (bfd_vma, disassemble_info*);
int print_insn_microblaze (bfd_vma, disassemble_info*);
int print_insn_ia64 (bfd_vma, disassemble_info*);
int print_insn_lm32 (bfd_vma, disassemble_info*);
+int print_insn_big_nios2 (bfd_vma, disassemble_info*);
+int print_insn_little_nios2 (bfd_vma, disassemble_info*);
#if 0
/* Fetch the disassembler for a given BFD, if that support is available. */
diff --git a/include/elf.h b/include/elf.h
index 1c2975dc82..0dbd3e968b 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -126,6 +126,8 @@ typedef int64_t Elf64_Sxword;
*/
#define EM_S390_OLD 0xA390
+#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
+
#define EM_MICROBLAZE 189
#define EM_MICROBLAZE_OLD 0xBAAB
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index a8c13cee66..bbc9478a50 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -95,15 +95,13 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr);
/**
* tlb_flush:
* @cpu: CPU whose TLB should be flushed
- * @flush_global: ignored
*
- * Flush the entire TLB for the specified CPU.
- * The flush_global flag is in theory an indicator of whether the whole
- * TLB should be flushed, or only those entries not marked global.
- * In practice QEMU does not implement any global/not global flag for
- * TLB entries, and the argument is ignored.
+ * Flush the entire TLB for the specified CPU. Most CPU architectures
+ * allow the implementation to drop entries from the TLB at any time
+ * so this is generally safe. If more selective flushing is required
+ * use one of the other functions for efficiency.
*/
-void tlb_flush(CPUState *cpu, int flush_global);
+void tlb_flush(CPUState *cpu);
/**
* tlb_flush_page_by_mmuidx:
* @cpu: CPU whose TLB should be flushed
@@ -165,7 +163,7 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
{
}
-static inline void tlb_flush(CPUState *cpu, int flush_global)
+static inline void tlb_flush(CPUState *cpu)
{
}
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 64560f61b4..a10044f08f 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -16,16 +16,12 @@
#ifndef CONFIG_USER_ONLY
-#define DIRTY_MEMORY_VGA 0
-#define DIRTY_MEMORY_CODE 1
-#define DIRTY_MEMORY_MIGRATION 2
-#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
-
#include "exec/cpu-common.h"
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
#endif
#include "exec/memattrs.h"
+#include "exec/ramlist.h"
#include "qemu/queue.h"
#include "qemu/int128.h"
#include "qemu/notify.h"
@@ -628,6 +624,9 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
*/
static inline bool memory_region_is_iommu(MemoryRegion *mr)
{
+ if (mr->alias) {
+ return memory_region_is_iommu(mr->alias);
+ }
return mr->iommu_ops;
}
@@ -1537,6 +1536,11 @@ void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
+/* address_space_get_iotlb_entry: translate an address into an IOTLB
+ * entry. Should be called from an RCU critical section.
+ */
+IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
+ bool is_write);
/* address_space_translate: translate an address range into an address space
* into a MemoryRegion and an address range into that section. Should be
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 54d7108a9e..3e79466a44 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -21,6 +21,7 @@
#ifndef CONFIG_USER_ONLY
#include "hw/xen/xen.h"
+#include "exec/ramlist.h"
struct RAMBlock {
struct rcu_head rcu;
@@ -35,6 +36,7 @@ struct RAMBlock {
char idstr[256];
/* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
int fd;
size_t page_size;
};
@@ -50,51 +52,7 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
return (char *)block->host + offset;
}
-/* The dirty memory bitmap is split into fixed-size blocks to allow growth
- * under RCU. The bitmap for a block can be accessed as follows:
- *
- * rcu_read_lock();
- *
- * DirtyMemoryBlocks *blocks =
- * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
- *
- * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
- * unsigned long *block = blocks.blocks[idx];
- * ...access block bitmap...
- *
- * rcu_read_unlock();
- *
- * Remember to check for the end of the block when accessing a range of
- * addresses. Move on to the next block if you reach the end.
- *
- * Organization into blocks allows dirty memory to grow (but not shrink) under
- * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
- * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
- * the same. Other threads can safely access existing blocks while dirty
- * memory is being grown. When no threads are using the old DirtyMemoryBlocks
- * anymore it is freed by RCU (but the underlying blocks stay because they are
- * pointed to from the new DirtyMemoryBlocks).
- */
-#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
-typedef struct {
- struct rcu_head rcu;
- unsigned long *blocks[];
-} DirtyMemoryBlocks;
-
-typedef struct RAMList {
- QemuMutex mutex;
- RAMBlock *mru_block;
- /* RCU-enabled, writes protected by the ramlist lock. */
- QLIST_HEAD(, RAMBlock) blocks;
- DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
- uint32_t version;
-} RAMList;
-extern RAMList ram_list;
-
ram_addr_t last_ram_offset(void);
-void qemu_mutex_lock_ramlist(void);
-void qemu_mutex_unlock_ramlist(void);
-
RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
bool share, const char *mem_path,
Error **errp);
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
new file mode 100644
index 0000000000..c59880de82
--- /dev/null
+++ b/include/exec/ramlist.h
@@ -0,0 +1,72 @@
+#ifndef RAMLIST_H
+#define RAMLIST_H
+
+#include "qemu/queue.h"
+#include "qemu/thread.h"
+#include "qemu/rcu.h"
+
+typedef struct RAMBlockNotifier RAMBlockNotifier;
+
+#define DIRTY_MEMORY_VGA 0
+#define DIRTY_MEMORY_CODE 1
+#define DIRTY_MEMORY_MIGRATION 2
+#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
+
+/* The dirty memory bitmap is split into fixed-size blocks to allow growth
+ * under RCU. The bitmap for a block can be accessed as follows:
+ *
+ * rcu_read_lock();
+ *
+ * DirtyMemoryBlocks *blocks =
+ * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ *
+ * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ * unsigned long *block = blocks.blocks[idx];
+ * ...access block bitmap...
+ *
+ * rcu_read_unlock();
+ *
+ * Remember to check for the end of the block when accessing a range of
+ * addresses. Move on to the next block if you reach the end.
+ *
+ * Organization into blocks allows dirty memory to grow (but not shrink) under
+ * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
+ * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
+ * the same. Other threads can safely access existing blocks while dirty
+ * memory is being grown. When no threads are using the old DirtyMemoryBlocks
+ * anymore it is freed by RCU (but the underlying blocks stay because they are
+ * pointed to from the new DirtyMemoryBlocks).
+ */
+#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
+typedef struct {
+ struct rcu_head rcu;
+ unsigned long *blocks[];
+} DirtyMemoryBlocks;
+
+typedef struct RAMList {
+ QemuMutex mutex;
+ RAMBlock *mru_block;
+ /* RCU-enabled, writes protected by the ramlist lock. */
+ QLIST_HEAD(, RAMBlock) blocks;
+ DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
+ uint32_t version;
+ QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
+} RAMList;
+extern RAMList ram_list;
+
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
+
+struct RAMBlockNotifier {
+ void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size);
+ void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size);
+ QLIST_ENTRY(RAMBlockNotifier) next;
+};
+
+void ram_block_notifier_add(RAMBlockNotifier *n);
+void ram_block_notifier_remove(RAMBlockNotifier *n);
+void ram_block_notify_add(void *host, size_t size);
+void ram_block_notify_remove(void *host, size_t size);
+
+
+#endif /* RAMLIST_H */
diff --git a/include/glib-compat.h b/include/glib-compat.h
index acf254d2a0..0cd24ffbe9 100644
--- a/include/glib-compat.h
+++ b/include/glib-compat.h
@@ -39,7 +39,7 @@ static inline gint64 qemu_g_get_monotonic_time(void)
#define g_get_monotonic_time() qemu_g_get_monotonic_time()
#endif
-#ifdef _WIN32
+#if defined(_WIN32) && !GLIB_CHECK_VERSION(2, 50, 0)
/*
* g_poll has a problem on Windows when using
* timeouts < 10ms, so use wrapper.
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index 154f3b82f6..4cc3630e61 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -191,10 +191,8 @@ struct AcpiFadtDescriptorRev5_1 {
typedef struct AcpiFadtDescriptorRev5_1 AcpiFadtDescriptorRev5_1;
-enum {
- ACPI_FADT_ARM_USE_PSCI_G_0_2 = 0,
- ACPI_FADT_ARM_PSCI_USE_HVC = 1,
-};
+#define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0)
+#define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1)
/*
* Serial Port Console Redirection Table (SPCR), Rev. 1.02
@@ -290,7 +288,7 @@ typedef struct AcpiMultipleApicTable AcpiMultipleApicTable;
#define ACPI_APIC_XRUPT_SOURCE 8
#define ACPI_APIC_LOCAL_X2APIC 9
#define ACPI_APIC_LOCAL_X2APIC_NMI 10
-#define ACPI_APIC_GENERIC_INTERRUPT 11
+#define ACPI_APIC_GENERIC_CPU_INTERFACE 11
#define ACPI_APIC_GENERIC_DISTRIBUTOR 12
#define ACPI_APIC_GENERIC_MSI_FRAME 13
#define ACPI_APIC_GENERIC_REDISTRIBUTOR 14
@@ -361,7 +359,7 @@ struct AcpiMadtLocalX2ApicNmi {
} QEMU_PACKED;
typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi;
-struct AcpiMadtGenericInterrupt {
+struct AcpiMadtGenericCpuInterface {
ACPI_SUB_HEADER_DEF
uint16_t reserved;
uint32_t cpu_interface_number;
@@ -378,7 +376,10 @@ struct AcpiMadtGenericInterrupt {
uint64_t arm_mpidr;
} QEMU_PACKED;
-typedef struct AcpiMadtGenericInterrupt AcpiMadtGenericInterrupt;
+typedef struct AcpiMadtGenericCpuInterface AcpiMadtGenericCpuInterface;
+
+/* GICC CPU Interface Flags */
+#define ACPI_MADT_GICC_ENABLED 1
struct AcpiMadtGenericDistributor {
ACPI_SUB_HEADER_DEF
@@ -427,21 +428,9 @@ typedef struct AcpiMadtGenericTranslator AcpiMadtGenericTranslator;
/*
* Generic Timer Description Table (GTDT)
*/
-
-#define ACPI_GTDT_INTERRUPT_MODE (1 << 0)
-#define ACPI_GTDT_INTERRUPT_POLARITY (1 << 1)
-#define ACPI_GTDT_ALWAYS_ON (1 << 2)
-
-/* Triggering */
-
-#define ACPI_LEVEL_SENSITIVE ((uint8_t) 0x00)
-#define ACPI_EDGE_SENSITIVE ((uint8_t) 0x01)
-
-/* Polarity */
-
-#define ACPI_ACTIVE_HIGH ((uint8_t) 0x00)
-#define ACPI_ACTIVE_LOW ((uint8_t) 0x01)
-#define ACPI_ACTIVE_BOTH ((uint8_t) 0x02)
+#define ACPI_GTDT_INTERRUPT_MODE_LEVEL (0 << 0)
+#define ACPI_GTDT_INTERRUPT_MODE_EDGE (1 << 0)
+#define ACPI_GTDT_CAP_ALWAYS_ON (1 << 2)
struct AcpiGenericTimerTable {
ACPI_TABLE_HEADER_DEF
@@ -638,8 +627,20 @@ struct AcpiDmarHardwareUnit {
} QEMU_PACKED;
typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit;
+/* Type 2: Root Port ATS Capability Reporting Structure */
+struct AcpiDmarRootPortATS {
+ uint16_t type;
+ uint16_t length;
+ uint8_t flags;
+ uint8_t reserved;
+ uint16_t pci_segment;
+ AcpiDmarDeviceScope scope[0];
+} QEMU_PACKED;
+typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS;
+
/* Masks for Flags field above */
#define ACPI_DMAR_INCLUDE_PCI_ALL 1
+#define ACPI_DMAR_ATSR_ALL_PORTS 1
/*
* Input Output Remapping Table (IORT)
diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h
index 901a4ae876..71d3c48e7d 100644
--- a/include/hw/acpi/acpi_dev_interface.h
+++ b/include/hw/acpi/acpi_dev_interface.h
@@ -57,6 +57,6 @@ typedef struct AcpiDeviceIfClass {
void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev);
void (*madt_cpu)(AcpiDeviceIf *adev, int uid,
- CPUArchIdList *apic_ids, GArray *entry);
+ const CPUArchIdList *apic_ids, GArray *entry);
} AcpiDeviceIfClass;
#endif
diff --git a/include/hw/acpi/memory_hotplug.h b/include/hw/acpi/memory_hotplug.h
index d2c7452397..db8ebc9cea 100644
--- a/include/hw/acpi/memory_hotplug.h
+++ b/include/hw/acpi/memory_hotplug.h
@@ -30,7 +30,7 @@ typedef struct MemHotplugState {
} MemHotplugState;
void acpi_memory_hotplug_init(MemoryRegion *as, Object *owner,
- MemHotplugState *state);
+ MemHotplugState *state, uint16_t io_base);
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st,
DeviceState *dev, Error **errp);
@@ -47,11 +47,7 @@ extern const VMStateDescription vmstate_memory_hotplug;
void acpi_memory_ospm_status(MemHotplugState *mem_st, ACPIOSTInfoList ***list);
-#define MEMORY_HOTPLUG_DEVICE "MHPD"
-#define MEMORY_SLOT_SCAN_METHOD "MSCN"
-#define MEMORY_HOTPLUG_HANDLER_PATH "\\_SB.PCI0." \
- MEMORY_HOTPLUG_DEVICE "." MEMORY_SLOT_SCAN_METHOD
-
-void build_memory_hotplug_aml(Aml *ctx, uint32_t nr_mem,
- uint16_t io_base, uint16_t io_len);
+void build_memory_hotplug_aml(Aml *table, uint32_t nr_mem,
+ const char *res_root,
+ const char *event_handler_method);
#endif
diff --git a/include/hw/acpi/pc-hotplug.h b/include/hw/acpi/pc-hotplug.h
index 6a8d268f84..31bc9191c3 100644
--- a/include/hw/acpi/pc-hotplug.h
+++ b/include/hw/acpi/pc-hotplug.h
@@ -29,29 +29,6 @@
#define PIIX4_CPU_HOTPLUG_IO_BASE 0xaf00
#define CPU_HOTPLUG_RESOURCE_DEVICE PRES
-#define ACPI_MEMORY_HOTPLUG_IO_LEN 24
#define ACPI_MEMORY_HOTPLUG_BASE 0x0a00
-#define MEMORY_SLOTS_NUMBER "MDNR"
-#define MEMORY_HOTPLUG_IO_REGION "HPMR"
-#define MEMORY_SLOT_ADDR_LOW "MRBL"
-#define MEMORY_SLOT_ADDR_HIGH "MRBH"
-#define MEMORY_SLOT_SIZE_LOW "MRLL"
-#define MEMORY_SLOT_SIZE_HIGH "MRLH"
-#define MEMORY_SLOT_PROXIMITY "MPX"
-#define MEMORY_SLOT_ENABLED "MES"
-#define MEMORY_SLOT_INSERT_EVENT "MINS"
-#define MEMORY_SLOT_REMOVE_EVENT "MRMV"
-#define MEMORY_SLOT_EJECT "MEJ"
-#define MEMORY_SLOT_SLECTOR "MSEL"
-#define MEMORY_SLOT_OST_EVENT "MOEV"
-#define MEMORY_SLOT_OST_STATUS "MOSC"
-#define MEMORY_SLOT_LOCK "MLCK"
-#define MEMORY_SLOT_STATUS_METHOD "MRST"
-#define MEMORY_SLOT_CRS_METHOD "MCRS"
-#define MEMORY_SLOT_OST_METHOD "MOST"
-#define MEMORY_SLOT_PROXIMITY_METHOD "MPXM"
-#define MEMORY_SLOT_EJECT_METHOD "MEJ0"
-#define MEMORY_SLOT_NOTIFY_METHOD "MTFY"
-
#endif
diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h
index 5406b498d7..1ab5deaa08 100644
--- a/include/hw/arm/aspeed_soc.h
+++ b/include/hw/arm/aspeed_soc.h
@@ -27,8 +27,9 @@ typedef struct AspeedSoCState {
DeviceState parent;
/*< public >*/
- ARMCPU *cpu;
+ ARMCPU cpu;
MemoryRegion iomem;
+ MemoryRegion sram;
AspeedVICState vic;
AspeedTimerCtrlState timerctrl;
AspeedI2CState i2c;
@@ -46,6 +47,7 @@ typedef struct AspeedSoCInfo {
const char *cpu_model;
uint32_t silicon_rev;
hwaddr sdram_base;
+ uint64_t sram_size;
int spis_num;
const hwaddr *spi_bases;
const char *fmc_typename;
diff --git a/include/hw/arm/virt-acpi-build.h b/include/hw/arm/virt-acpi-build.h
deleted file mode 100644
index f5ec749b8f..0000000000
--- a/include/hw/arm/virt-acpi-build.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *
- * Copyright (c) 2015 HUAWEI TECHNOLOGIES CO.,LTD.
- *
- * Author: Shannon Zhao <zhaoshenglong@huawei.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2 or later, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef QEMU_VIRT_ACPI_BUILD_H
-#define QEMU_VIRT_ACPI_BUILD_H
-
-#include "qemu-common.h"
-#include "hw/arm/virt.h"
-#include "qemu/notify.h"
-
-#define ACPI_GICC_ENABLED 1
-
-typedef struct VirtGuestInfo {
- int smp_cpus;
- FWCfgState *fw_cfg;
- const MemMapEntry *memmap;
- const int *irqmap;
- bool use_highmem;
- int gic_version;
- bool no_its;
-} VirtGuestInfo;
-
-
-typedef struct VirtGuestInfoState {
- VirtGuestInfo info;
- Notifier machine_done;
-} VirtGuestInfoState;
-
-void virt_acpi_setup(VirtGuestInfo *guest_info);
-
-#endif
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9650193253..58ce74e0e5 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -32,10 +32,15 @@
#include "qemu-common.h"
#include "exec/hwaddr.h"
+#include "qemu/notify.h"
+#include "hw/boards.h"
+#include "hw/arm/arm.h"
#define NUM_GICV2M_SPIS 64
#define NUM_VIRTIO_TRANSPORTS 32
+#define ARCH_GICV3_MAINT_IRQ 9
+
#define ARCH_TIMER_VIRT_IRQ 11
#define ARCH_TIMER_S_EL1_IRQ 13
#define ARCH_TIMER_NS_EL1_IRQ 14
@@ -74,5 +79,42 @@ typedef struct MemMapEntry {
hwaddr size;
} MemMapEntry;
+typedef struct {
+ MachineClass parent;
+ bool disallow_affinity_adjustment;
+ bool no_its;
+ bool no_pmu;
+ bool claim_edge_triggered_timers;
+} VirtMachineClass;
+
+typedef struct {
+ MachineState parent;
+ Notifier machine_done;
+ FWCfgState *fw_cfg;
+ bool secure;
+ bool highmem;
+ bool virt;
+ int32_t gic_version;
+ struct arm_boot_info bootinfo;
+ const MemMapEntry *memmap;
+ const int *irqmap;
+ int smp_cpus;
+ void *fdt;
+ int fdt_size;
+ uint32_t clock_phandle;
+ uint32_t gic_phandle;
+ uint32_t msi_phandle;
+ int psci_conduit;
+} VirtMachineState;
+
+#define TYPE_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
+#define VIRT_MACHINE(obj) \
+ OBJECT_CHECK(VirtMachineState, (obj), TYPE_VIRT_MACHINE)
+#define VIRT_MACHINE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtMachineClass, obj, TYPE_VIRT_MACHINE)
+#define VIRT_MACHINE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtMachineClass, klass, TYPE_VIRT_MACHINE)
+
+void virt_acpi_setup(VirtMachineState *vms);
-#endif
+#endif /* QEMU_ARM_VIRT_H */
diff --git a/include/hw/boards.h b/include/hw/boards.h
index a51da9c440..ac891a828b 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -135,7 +135,7 @@ struct MachineClass {
HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
DeviceState *dev);
unsigned (*cpu_index_to_socket_id)(unsigned cpu_index);
- CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine);
+ const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine);
HotpluggableCPUList *(*query_hotpluggable_cpus)(MachineState *machine);
};
diff --git a/include/hw/compat.h b/include/hw/compat.h
index 8dfc7a38c0..34e9b4a660 100644
--- a/include/hw/compat.h
+++ b/include/hw/compat.h
@@ -1,6 +1,17 @@
#ifndef HW_COMPAT_H
#define HW_COMPAT_H
+#define HW_COMPAT_2_8 \
+ {\
+ .driver = "fw_cfg_mem",\
+ .property = "x-file-slots",\
+ .value = stringify(0x10),\
+ },{\
+ .driver = "fw_cfg_io",\
+ .property = "x-file-slots",\
+ .value = stringify(0x10),\
+ },
+
#define HW_COMPAT_2_7 \
{\
.driver = "virtio-pci",\
diff --git a/include/hw/dma/xlnx_dpdma.h b/include/hw/dma/xlnx_dpdma.h
index 664df28ae6..7a304a5bb4 100644
--- a/include/hw/dma/xlnx_dpdma.h
+++ b/include/hw/dma/xlnx_dpdma.h
@@ -53,7 +53,8 @@ typedef struct XlnxDPDMAState XlnxDPDMAState;
* data to the buffer specified by
* dpdma_set_host_data_location().
*
- * Returns The number of bytes transfered by the DPDMA or 0 if an error occured.
+ * Returns The number of bytes transferred by the DPDMA
+ * or 0 if an error occurred.
*
* @s The DPDMA state.
* @channel The channel to start.
diff --git a/include/hw/hw.h b/include/hw/hw.h
index 3669ebd916..e22d4ce5fa 100644
--- a/include/hw/hw.h
+++ b/include/hw/hw.h
@@ -12,11 +12,7 @@
#include "hw/irq.h"
#include "migration/vmstate.h"
#include "qemu/module.h"
-
-typedef void QEMUResetHandler(void *opaque);
-
-void qemu_register_reset(QEMUResetHandler *func, void *opaque);
-void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
+#include "sysemu/reset.h"
void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
diff --git a/include/hw/i2c/i2c.h b/include/hw/i2c/i2c.h
index c4085aa366..2ce611d4c8 100644
--- a/include/hw/i2c/i2c.h
+++ b/include/hw/i2c/i2c.h
@@ -32,14 +32,22 @@ typedef struct I2CSlaveClass
/* Callbacks provided by the device. */
int (*init)(I2CSlave *dev);
- /* Master to slave. */
+ /* Master to slave. Returns non-zero for a NAK, 0 for success. */
int (*send)(I2CSlave *s, uint8_t data);
- /* Slave to master. */
+ /*
+ * Slave to master. This cannot fail, the device should always
+ * return something here. Negative values probably result in 0xff
+ * and a possible log from the driver, and shouldn't be used.
+ */
int (*recv)(I2CSlave *s);
- /* Notify the slave of a bus state change. */
- void (*event)(I2CSlave *s, enum i2c_event event);
+ /*
+ * Notify the slave of a bus state change. For start event,
+ * returns non-zero to NAK an operation. For other events the
+ * return code is not used and should be zero.
+ */
+ int (*event)(I2CSlave *s, enum i2c_event event);
} I2CSlaveClass;
struct I2CSlave
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index b22e699c46..5a20c5e38e 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -361,7 +361,7 @@ uint16_t pvpanic_port(void);
/* acpi-build.c */
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
- CPUArchIdList *apic_ids, GArray *entry);
+ const CPUArchIdList *apic_ids, GArray *entry);
/* e820 types */
#define E820_RAM 1
@@ -375,14 +375,15 @@ int e820_get_num_entries(void);
bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
#define PC_COMPAT_2_8 \
-
-#define PC_COMPAT_2_7 \
- HW_COMPAT_2_7 \
+ HW_COMPAT_2_8 \
{\
.driver = "kvmclock",\
.property = "x-mach-use-reliable-get-clock",\
.value = "off",\
},\
+
+#define PC_COMPAT_2_7 \
+ HW_COMPAT_2_7 \
{\
.driver = TYPE_X86_CPU,\
.property = "l3-cache",\
diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h
index 0c89d9835b..361c07cdc6 100644
--- a/include/hw/i386/x86-iommu.h
+++ b/include/hw/i386/x86-iommu.h
@@ -73,6 +73,7 @@ typedef struct IEC_Notifier IEC_Notifier;
struct X86IOMMUState {
SysBusDevice busdev;
bool intr_supported; /* Whether vIOMMU supports IR */
+ bool dt_supported; /* Whether vIOMMU supports DT */
IommuType type; /* IOMMU type - AMD/Intel */
QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */
};
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index f4c349a2ef..af3ca18e2f 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -55,6 +55,8 @@ typedef struct GICState {
qemu_irq parent_irq[GIC_NCPU];
qemu_irq parent_fiq[GIC_NCPU];
+ qemu_irq parent_virq[GIC_NCPU];
+ qemu_irq parent_vfiq[GIC_NCPU];
/* GICD_CTLR; for a GIC with the security extensions the NS banked version
* of this register is just an alias of bit 1 of the S banked version.
*/
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 341a3118f0..4156051d98 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -38,6 +38,9 @@
/* Number of SGI target-list bits */
#define GICV3_TARGETLIST_BITS 16
+/* Maximum number of list registers (architectural limit) */
+#define GICV3_LR_MAX 16
+
/* Minimum BPR for Secure, or when security not enabled */
#define GIC_MIN_BPR 0
/* Minimum BPR for Nonsecure when security is enabled */
@@ -145,6 +148,9 @@ struct GICv3CPUState {
CPUState *cpu;
qemu_irq parent_irq;
qemu_irq parent_fiq;
+ qemu_irq parent_virq;
+ qemu_irq parent_vfiq;
+ qemu_irq maintenance_irq;
/* Redistributor */
uint32_t level; /* Current IRQ level */
@@ -173,6 +179,21 @@ struct GICv3CPUState {
uint64_t icc_igrpen[3];
uint64_t icc_ctlr_el3;
+ /* Virtualization control interface */
+ uint64_t ich_apr[3][4]; /* ich_apr[GICV3_G1][x] never used */
+ uint64_t ich_hcr_el2;
+ uint64_t ich_lr_el2[GICV3_LR_MAX];
+ uint64_t ich_vmcr_el2;
+
+ /* Properties of the CPU interface. These are initialized from
+ * the settings in the CPU proper.
+ * If the number of implemented list registers is 0 then the
+ * virtualization support is not implemented.
+ */
+ int num_list_regs;
+ int vpribits; /* number of virtual priority bits */
+ int vprebits; /* number of virtual preemption bits */
+
/* Current highest priority pending interrupt for this CPU.
* This is cached information that can be recalculated from the
* real state above; it doesn't need to be migrated.
diff --git a/include/hw/loader.h b/include/hw/loader.h
index 0c864cfd60..0dbd8d6bf3 100644
--- a/include/hw/loader.h
+++ b/include/hw/loader.h
@@ -180,7 +180,8 @@ MemoryRegion *rom_add_blob(const char *name, const void *blob, size_t len,
size_t max_len, hwaddr addr,
const char *fw_file_name,
FWCfgReadCallback fw_callback,
- void *callback_opaque, AddressSpace *as);
+ void *callback_opaque, AddressSpace *as,
+ bool read_only);
int rom_add_elf_program(const char *name, void *data, size_t datasize,
size_t romsize, hwaddr addr, AddressSpace *as);
int rom_check_and_register_reset(void);
@@ -194,7 +195,7 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict);
#define rom_add_file_fixed(_f, _a, _i) \
rom_add_file(_f, NULL, _a, _i, false, NULL, NULL)
#define rom_add_blob_fixed(_f, _b, _l, _a) \
- rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, NULL)
+ rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, NULL, true)
#define rom_add_file_mr(_f, _mr, _i) \
rom_add_file(_f, NULL, 0, _i, false, _mr, NULL)
#define rom_add_file_as(_f, _as, _i) \
@@ -202,7 +203,7 @@ void hmp_info_roms(Monitor *mon, const QDict *qdict);
#define rom_add_file_fixed_as(_f, _a, _i, _as) \
rom_add_file(_f, NULL, _a, _i, false, NULL, _as)
#define rom_add_blob_fixed_as(_f, _b, _l, _a, _as) \
- rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as)
+ rom_add_blob(_f, _b, _l, _l, _a, NULL, NULL, NULL, _as, true)
#define PC_ROM_MIN_VGA 0xc0000
#define PC_ROM_MIN_OPTION 0xc8000
diff --git a/include/hw/m68k/mcf.h b/include/hw/m68k/mcf.h
index fdae229502..bf43998d9b 100644
--- a/include/hw/m68k/mcf.h
+++ b/include/hw/m68k/mcf.h
@@ -21,10 +21,6 @@ qemu_irq *mcf_intc_init(struct MemoryRegion *sysmem,
hwaddr base,
M68kCPU *cpu);
-/* mcf_fec.c */
-void mcf_fec_init(struct MemoryRegion *sysmem, NICInfo *nd,
- hwaddr base, qemu_irq *irq);
-
/* mcf5206.c */
qemu_irq *mcf5206_init(struct MemoryRegion *sysmem,
uint32_t base, M68kCPU *cpu);
diff --git a/include/hw/m68k/mcf_fec.h b/include/hw/m68k/mcf_fec.h
new file mode 100644
index 0000000000..7f029f7b59
--- /dev/null
+++ b/include/hw/m68k/mcf_fec.h
@@ -0,0 +1,13 @@
+/*
+ * Definitions for the ColdFire Fast Ethernet Controller emulation.
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define TYPE_MCF_FEC_NET "mcf-fec"
+#define MCF_FEC_NET(obj) OBJECT_CHECK(mcf_fec_state, (obj), TYPE_MCF_FEC_NET)
+
+#define FEC_NUM_IRQ 13
diff --git a/include/hw/misc/aspeed_scu.h b/include/hw/misc/aspeed_scu.h
index 14ffc43de8..bd4ac013f9 100644
--- a/include/hw/misc/aspeed_scu.h
+++ b/include/hw/misc/aspeed_scu.h
@@ -32,6 +32,7 @@ typedef struct AspeedSCUState {
} AspeedSCUState;
#define AST2400_A0_SILICON_REV 0x02000303U
+#define AST2400_A1_SILICON_REV 0x02010303U
#define AST2500_A0_SILICON_REV 0x04000303U
#define AST2500_A1_SILICON_REV 0x04010303U
diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h
index 5c27a1f0d5..b980cbaebf 100644
--- a/include/hw/nvram/fw_cfg.h
+++ b/include/hw/nvram/fw_cfg.h
@@ -136,6 +136,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data,
* @callback_opaque: argument to be passed into callback function
* @data: pointer to start of item data
* @len: size of item data
+ * @read_only: is file read only
*
* Add a new NAMED fw_cfg item as a raw "blob" of the given size. The data
* referenced by the starting pointer is only linked, NOT copied, into the
@@ -151,7 +152,7 @@ void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data,
*/
void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
FWCfgReadCallback callback, void *callback_opaque,
- void *data, size_t len);
+ void *data, size_t len, bool read_only);
/**
* fw_cfg_modify_file:
diff --git a/include/hw/nvram/fw_cfg_keys.h b/include/hw/nvram/fw_cfg_keys.h
index 0f3e871884..b6919451f5 100644
--- a/include/hw/nvram/fw_cfg_keys.h
+++ b/include/hw/nvram/fw_cfg_keys.h
@@ -29,8 +29,7 @@
#define FW_CFG_FILE_DIR 0x19
#define FW_CFG_FILE_FIRST 0x20
-#define FW_CFG_FILE_SLOTS 0x10
-#define FW_CFG_MAX_ENTRY (FW_CFG_FILE_FIRST + FW_CFG_FILE_SLOTS)
+#define FW_CFG_FILE_SLOTS_MIN 0x10
#define FW_CFG_WRITE_CHANNEL 0x4000
#define FW_CFG_ARCH_LOCAL 0x8000
diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h
index 94486fdd37..53b6760c16 100644
--- a/include/hw/pci-host/q35.h
+++ b/include/hw/pci-host/q35.h
@@ -180,7 +180,7 @@ typedef struct Q35PCIHost {
uint64_t mch_mcfg_base(void);
/*
- * Arbitary but unique BNF number for IOAPIC device.
+ * Arbitrary but unique BNF number for IOAPIC device.
*
* TODO: make sure there would have no conflict with real PCI bus
*/
diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h
index 056d25e53c..163c5195b6 100644
--- a/include/hw/pci/pcie.h
+++ b/include/hw/pci/pcie.h
@@ -74,6 +74,9 @@ struct PCIExpressDevice {
/* AER */
uint16_t aer_cap;
PCIEAERLog aer_log;
+
+ /* Offset of ATS capability in config space */
+ uint16_t ats_cap;
};
#define COMPAT_PROP_PCP "power_controller_present"
@@ -120,16 +123,7 @@ void pcie_add_capability(PCIDevice *dev,
void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn);
void pcie_dev_ser_num_init(PCIDevice *dev, uint16_t offset, uint64_t ser_num);
-
-extern const VMStateDescription vmstate_pcie_device;
-
-#define VMSTATE_PCIE_DEVICE(_field, _state) { \
- .name = (stringify(_field)), \
- .size = sizeof(PCIDevice), \
- .vmsd = &vmstate_pcie_device, \
- .flags = VMS_STRUCT, \
- .offset = vmstate_offset_value(_state, _field, PCIDevice), \
-}
+void pcie_ats_init(PCIDevice *dev, uint16_t offset);
void pcie_cap_slot_hotplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
diff --git a/include/hw/pci/pcie_aer.h b/include/hw/pci/pcie_aer.h
index c2ee4e2bdb..526802bd31 100644
--- a/include/hw/pci/pcie_aer.h
+++ b/include/hw/pci/pcie_aer.h
@@ -44,7 +44,6 @@ struct PCIEAERLog {
*/
#define PCIE_AER_LOG_MAX_DEFAULT 8
#define PCIE_AER_LOG_MAX_LIMIT 128
-#define PCIE_AER_LOG_MAX_UNSET 0xffff
uint16_t log_max;
/* Error log. log_max-sized array */
@@ -87,7 +86,8 @@ struct PCIEAERErr {
extern const VMStateDescription vmstate_pcie_aer_log;
-int pcie_aer_init(PCIDevice *dev, uint16_t offset, uint16_t size);
+int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset,
+ uint16_t size, Error **errp);
void pcie_aer_exit(PCIDevice *dev);
void pcie_aer_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len);
diff --git a/include/hw/register.h b/include/hw/register.h
index 8c12233b75..5b6dc32091 100644
--- a/include/hw/register.h
+++ b/include/hw/register.h
@@ -92,7 +92,7 @@ struct RegisterInfo {
* This structure is used to group all of the individual registers which are
* modeled using the RegisterInfo structure.
*
- * @r is an aray containing of all the relevent RegisterInfo structures.
+ * @r is an array containing of all the relevant RegisterInfo structures.
*
* @num_elements is the number of elements in the array r
*
diff --git a/include/hw/smbios/smbios.h b/include/hw/smbios/smbios.h
index 1cd53cc58c..31e8d5f47e 100644
--- a/include/hw/smbios/smbios.h
+++ b/include/hw/smbios/smbios.h
@@ -257,7 +257,7 @@ struct smbios_type_127 {
struct smbios_structure_header header;
} QEMU_PACKED;
-void smbios_entry_add(QemuOpts *opts);
+void smbios_entry_add(QemuOpts *opts, Error **errp);
void smbios_set_cpuid(uint32_t version, uint32_t features);
void smbios_set_defaults(const char *manufacturer, const char *product,
const char *version, bool legacy_mode,
diff --git a/include/hw/sparc/sparc64.h b/include/hw/sparc/sparc64.h
new file mode 100644
index 0000000000..7748939a97
--- /dev/null
+++ b/include/hw/sparc/sparc64.h
@@ -0,0 +1,5 @@
+
+SPARCCPU *sparc64_cpu_devinit(const char *cpu_model,
+ const char *dflt_cpu_model, uint64_t prom_addr);
+
+void sparc64_cpu_set_ivec_irq(void *opaque, int irq, int level);
diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h
index bdfbcc0ffa..1f557313fa 100644
--- a/include/hw/ssi/aspeed_smc.h
+++ b/include/hw/ssi/aspeed_smc.h
@@ -44,10 +44,12 @@ typedef struct AspeedSMCController {
const AspeedSegments *segments;
hwaddr flash_window_base;
uint32_t flash_window_size;
+ bool has_dma;
+ uint32_t nregs;
} AspeedSMCController;
typedef struct AspeedSMCFlash {
- const struct AspeedSMCState *controller;
+ struct AspeedSMCState *controller;
uint8_t id;
uint32_t size;
diff --git a/include/hw/timer/sun4v-rtc.h b/include/hw/timer/sun4v-rtc.h
new file mode 100644
index 0000000000..407278f918
--- /dev/null
+++ b/include/hw/timer/sun4v-rtc.h
@@ -0,0 +1 @@
+void sun4v_rtc_init(hwaddr addr);
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 6e90703cad..c3cf4a72bc 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -11,6 +11,8 @@
#ifndef VHOST_BACKEND_H
#define VHOST_BACKEND_H
+#include "exec/memory.h"
+
typedef enum VhostBackendType {
VHOST_BACKEND_TYPE_NONE = 0,
VHOST_BACKEND_TYPE_KERNEL = 1,
@@ -32,6 +34,7 @@ typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev);
typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
+typedef int (*vhost_net_set_mtu_op)(struct vhost_dev *dev, uint16_t mtu);
typedef int (*vhost_scsi_set_endpoint_op)(struct vhost_dev *dev,
struct vhost_scsi_target *target);
typedef int (*vhost_scsi_clear_endpoint_op)(struct vhost_dev *dev,
@@ -76,6 +79,14 @@ typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev,
typedef int (*vhost_vsock_set_guest_cid_op)(struct vhost_dev *dev,
uint64_t guest_cid);
typedef int (*vhost_vsock_set_running_op)(struct vhost_dev *dev, int start);
+typedef void (*vhost_set_iotlb_callback_op)(struct vhost_dev *dev,
+ int enabled);
+typedef int (*vhost_update_device_iotlb_op)(struct vhost_dev *dev,
+ uint64_t iova, uint64_t uaddr,
+ uint64_t len,
+ IOMMUAccessFlags perm);
+typedef int (*vhost_invalidate_device_iotlb_op)(struct vhost_dev *dev,
+ uint64_t iova, uint64_t len);
typedef struct VhostOps {
VhostBackendType backend_type;
@@ -83,6 +94,7 @@ typedef struct VhostOps {
vhost_backend_cleanup vhost_backend_cleanup;
vhost_backend_memslots_limit vhost_backend_memslots_limit;
vhost_net_set_backend_op vhost_net_set_backend;
+ vhost_net_set_mtu_op vhost_net_set_mtu;
vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint;
vhost_scsi_clear_endpoint_op vhost_scsi_clear_endpoint;
vhost_scsi_get_abi_version_op vhost_scsi_get_abi_version;
@@ -107,6 +119,9 @@ typedef struct VhostOps {
vhost_backend_can_merge_op vhost_backend_can_merge;
vhost_vsock_set_guest_cid_op vhost_vsock_set_guest_cid;
vhost_vsock_set_running_op vhost_vsock_set_running;
+ vhost_set_iotlb_callback_op vhost_set_iotlb_callback;
+ vhost_update_device_iotlb_op vhost_update_device_iotlb;
+ vhost_invalidate_device_iotlb_op vhost_invalidate_device_iotlb;
} VhostOps;
extern const VhostOps user_ops;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 1fe5aadef5..52f633ec89 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -21,6 +21,7 @@ struct vhost_virtqueue {
unsigned long long used_phys;
unsigned used_size;
EventNotifier masked_notifier;
+ struct vhost_dev *dev;
};
typedef unsigned long vhost_log_chunk_t;
@@ -38,6 +39,7 @@ struct vhost_log {
struct vhost_memory;
struct vhost_dev {
+ VirtIODevice *vdev;
MemoryListener memory_listener;
struct vhost_memory *mem;
int n_mem_sections;
@@ -62,6 +64,7 @@ struct vhost_dev {
void *opaque;
struct vhost_log *log;
QLIST_ENTRY(vhost_dev) entry;
+ IOMMUNotifier n;
};
int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
@@ -91,4 +94,5 @@ bool vhost_has_free_slot(void);
int vhost_net_set_backend(struct vhost_dev *hdev,
struct vhost_vring_file *file);
+void vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
#endif
diff --git a/include/hw/virtio/virtio-access.h b/include/hw/virtio/virtio-access.h
index 440b4555ea..91ae14d254 100644
--- a/include/hw/virtio/virtio-access.h
+++ b/include/hw/virtio/virtio-access.h
@@ -17,6 +17,7 @@
#define QEMU_VIRTIO_ACCESS_H
#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-bus.h"
#include "exec/address-spaces.h"
#if defined(TARGET_PPC64) || defined(TARGET_ARM)
@@ -40,45 +41,55 @@ static inline bool virtio_access_is_big_endian(VirtIODevice *vdev)
static inline uint16_t virtio_lduw_phys(VirtIODevice *vdev, hwaddr pa)
{
+ AddressSpace *dma_as = vdev->dma_as;
+
if (virtio_access_is_big_endian(vdev)) {
- return lduw_be_phys(&address_space_memory, pa);
+ return lduw_be_phys(dma_as, pa);
}
- return lduw_le_phys(&address_space_memory, pa);
+ return lduw_le_phys(dma_as, pa);
}
static inline uint32_t virtio_ldl_phys(VirtIODevice *vdev, hwaddr pa)
{
+ AddressSpace *dma_as = vdev->dma_as;
+
if (virtio_access_is_big_endian(vdev)) {
- return ldl_be_phys(&address_space_memory, pa);
+ return ldl_be_phys(dma_as, pa);
}
- return ldl_le_phys(&address_space_memory, pa);
+ return ldl_le_phys(dma_as, pa);
}
static inline uint64_t virtio_ldq_phys(VirtIODevice *vdev, hwaddr pa)
{
+ AddressSpace *dma_as = vdev->dma_as;
+
if (virtio_access_is_big_endian(vdev)) {
- return ldq_be_phys(&address_space_memory, pa);
+ return ldq_be_phys(dma_as, pa);
}
- return ldq_le_phys(&address_space_memory, pa);
+ return ldq_le_phys(dma_as, pa);
}
static inline void virtio_stw_phys(VirtIODevice *vdev, hwaddr pa,
uint16_t value)
{
+ AddressSpace *dma_as = vdev->dma_as;
+
if (virtio_access_is_big_endian(vdev)) {
- stw_be_phys(&address_space_memory, pa, value);
+ stw_be_phys(dma_as, pa, value);
} else {
- stw_le_phys(&address_space_memory, pa, value);
+ stw_le_phys(dma_as, pa, value);
}
}
static inline void virtio_stl_phys(VirtIODevice *vdev, hwaddr pa,
uint32_t value)
{
+ AddressSpace *dma_as = vdev->dma_as;
+
if (virtio_access_is_big_endian(vdev)) {
- stl_be_phys(&address_space_memory, pa, value);
+ stl_be_phys(dma_as, pa, value);
} else {
- stl_le_phys(&address_space_memory, pa, value);
+ stl_le_phys(dma_as, pa, value);
}
}
diff --git a/include/hw/virtio/virtio-bus.h b/include/hw/virtio/virtio-bus.h
index 8a51e2c564..a63c1d216d 100644
--- a/include/hw/virtio/virtio-bus.h
+++ b/include/hw/virtio/virtio-bus.h
@@ -88,6 +88,7 @@ typedef struct VirtioBusClass {
* Note that changing this will break migration for this transport.
*/
bool has_variable_vring_alignment;
+ AddressSpace *(*get_dma_as)(DeviceState *d);
} VirtioBusClass;
struct VirtioBusState {
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 20d1cd683a..f3a98a3261 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -38,6 +38,7 @@ struct virtio_gpu_simple_resource {
unsigned int iov_cnt;
uint32_t scanout_bitmask;
pixman_image_t *image;
+ uint64_t hostmem;
QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
};
@@ -68,6 +69,7 @@ enum virtio_gpu_conf_flags {
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_STATS_ENABLED))
struct virtio_gpu_conf {
+ uint64_t max_hostmem;
uint32_t max_outputs;
uint32_t flags;
};
@@ -103,6 +105,7 @@ typedef struct VirtIOGPU {
struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
struct virtio_gpu_conf conf;
+ uint64_t hostmem;
int enabled_output_bitmask;
struct virtio_gpu_config virtio_config;
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 0ced975c57..8ea56a8f60 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -36,6 +36,7 @@ typedef struct virtio_net_conf
int32_t txburst;
char *tx;
uint16_t rx_queue_size;
+ uint16_t mtu;
} virtio_net_conf;
/* Maximum packet size we can receive from tap device: header + 64k */
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index ab0e030cc4..6523bacd2f 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -92,6 +92,7 @@ struct VirtIODevice
char *bus_name;
uint8_t device_endian;
bool use_guest_notifier_mask;
+ AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
};
@@ -170,9 +171,10 @@ bool virtqueue_rewind(VirtQueue *vq, unsigned int num);
void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
unsigned int len, unsigned int idx);
-void virtqueue_map(VirtQueueElement *elem);
+void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem);
void *virtqueue_pop(VirtQueue *vq, size_t sz);
-void *qemu_get_virtqueue_element(QEMUFile *f, size_t sz);
+unsigned int virtqueue_drop_all(VirtQueue *vq);
+void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz);
void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem);
int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
unsigned int out_bytes);
@@ -226,6 +228,7 @@ void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr);
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n);
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num);
int virtio_queue_get_num(VirtIODevice *vdev, int n);
+int virtio_queue_get_max_num(VirtIODevice *vdev, int n);
int virtio_get_num_queues(VirtIODevice *vdev);
void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
hwaddr avail, hwaddr used);
@@ -255,7 +258,9 @@ typedef struct VirtIORNGConf VirtIORNGConf;
DEFINE_PROP_BIT64("notify_on_empty", _state, _field, \
VIRTIO_F_NOTIFY_ON_EMPTY, true), \
DEFINE_PROP_BIT64("any_layout", _state, _field, \
- VIRTIO_F_ANY_LAYOUT, true)
+ VIRTIO_F_ANY_LAYOUT, true), \
+ DEFINE_PROP_BIT64("iommu_platform", _state, _field, \
+ VIRTIO_F_IOMMU_PLATFORM, false)
hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n);
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n);
@@ -266,6 +271,7 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n);
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n);
void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n);
+void virtio_queue_update_used_idx(VirtIODevice *vdev, int n);
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
uint16_t virtio_get_queue_index(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
diff --git a/include/io/dns-resolver.h b/include/io/dns-resolver.h
new file mode 100644
index 0000000000..2f69c08c13
--- /dev/null
+++ b/include/io/dns-resolver.h
@@ -0,0 +1,228 @@
+/*
+ * QEMU DNS resolver
+ *
+ * Copyright (c) 2016-2017 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QIO_DNS_RESOLVER_H
+#define QIO_DNS_RESOLVER_H
+
+#include "qemu-common.h"
+#include "qom/object.h"
+#include "io/task.h"
+
+#define TYPE_QIO_DNS_RESOLVER "qio-dns-resolver"
+#define QIO_DNS_RESOLVER(obj) \
+ OBJECT_CHECK(QIODNSResolver, (obj), TYPE_QIO_DNS_RESOLVER)
+#define QIO_DNS_RESOLVER_CLASS(klass) \
+ OBJECT_CLASS_CHECK(QIODNSResolverClass, klass, TYPE_QIO_DNS_RESOLVER)
+#define QIO_DNS_RESOLVER_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(QIODNSResolverClass, obj, TYPE_QIO_DNS_RESOLVER)
+
+typedef struct QIODNSResolver QIODNSResolver;
+typedef struct QIODNSResolverClass QIODNSResolverClass;
+
+/**
+ * QIODNSResolver:
+ *
+ * The QIODNSResolver class provides a framework for doing
+ * DNS resolution on SocketAddress objects, independently
+ * of socket creation.
+ *
+ * <example>
+ * <title>Resolving addresses synchronously</title>
+ * <programlisting>
+ * int mylisten(SocketAddress *addr, Error **errp) {
+ * QIODNSResolver *resolver = qio_dns_resolver_get_instance();
+ * SocketAddress **rawaddrs = NULL;
+ * size_t nrawaddrs = 0;
+ * Error *err = NULL;
+ * QIOChannel **socks = NULL;
+ * size_t nsocks = 0;
+ *
+ * if (qio_dns_resolver_lookup_sync(dns, addr, &nrawaddrs,
+ * &rawaddrs, errp) < 0) {
+ * return -1;
+ * }
+ *
+ * for (i = 0; i < nrawaddrs; i++) {
+ * QIOChannel *sock = qio_channel_new();
+ * Error *local_err = NULL;
+ * qio_channel_listen_sync(sock, rawaddrs[i], &local_err);
+ * if (local_err) {
+ * error_propagate(&err, local_err);
+ * } else {
+ * socks = g_renew(QIOChannelSocket *, socks, nsocks + 1);
+ * socks[nsocks++] = sock;
+ * }
+ * qapi_free_SocketAddress(rawaddrs[i]);
+ * }
+ * g_free(rawaddrs);
+ *
+ * if (nsocks == 0) {
+ * error_propagate(errp, err);
+ * } else {
+ * error_free(err);
+ * }
+ * }
+ * </programlisting>
+ * </example>
+ *
+ * <example>
+ * <title>Resolving addresses asynchronously</title>
+ * <programlisting>
+ * typedef struct MyListenData {
+ * Error *err;
+ * QIOChannelSocket **socks;
+ * size_t nsocks;
+ * } MyListenData;
+ *
+ * void mylistenresult(QIOTask *task, void *opaque) {
+ * MyListenData *data = opaque;
+ * QIODNSResolver *resolver =
+ * QIO_DNS_RESOLVER(qio_task_get_source(task);
+ * SocketAddress **rawaddrs = NULL;
+ * size_t nrawaddrs = 0;
+ * Error *err = NULL;
+ *
+ * if (qio_task_propagate_error(task, &data->err)) {
+ * return;
+ * }
+ *
+ * qio_dns_resolver_lookup_result(resolver, task,
+ * &nrawaddrs, &rawaddrs);
+ *
+ * for (i = 0; i < nrawaddrs; i++) {
+ * QIOChannel *sock = qio_channel_new();
+ * Error *local_err = NULL;
+ * qio_channel_listen_sync(sock, rawaddrs[i], &local_err);
+ * if (local_err) {
+ * error_propagate(&err, local_err);
+ * } else {
+ * socks = g_renew(QIOChannelSocket *, socks, nsocks + 1);
+ * socks[nsocks++] = sock;
+ * }
+ * qapi_free_SocketAddress(rawaddrs[i]);
+ * }
+ * g_free(rawaddrs);
+ *
+ * if (nsocks == 0) {
+ * error_propagate(&data->err, err);
+ * } else {
+ * error_free(err);
+ * }
+ * }
+ *
+ * void mylisten(SocketAddress *addr, MyListenData *data) {
+ * QIODNSResolver *resolver = qio_dns_resolver_get_instance();
+ * qio_dns_resolver_lookup_async(dns, addr,
+ * mylistenresult, data, NULL);
+ * }
+ * </programlisting>
+ * </example>
+ */
+struct QIODNSResolver {
+ Object parent;
+};
+
+struct QIODNSResolverClass {
+ ObjectClass parent;
+};
+
+
+/**
+ * qio_dns_resolver_get_instance:
+ *
+ * Get the singleton dns resolver instance. The caller
+ * does not own a reference on the returned object.
+ *
+ * Returns: the single dns resolver instance
+ */
+QIODNSResolver *qio_dns_resolver_get_instance(void);
+
+/**
+ * qio_dns_resolver_lookup_sync:
+ * @resolver: the DNS resolver instance
+ * @addr: the address to resolve
+ * @naddr: pointer to hold number of resolved addresses
+ * @addrs: pointer to hold resolved addresses
+ * @errp: pointer to NULL initialized error object
+ *
+ * This will attempt to resolve the address provided
+ * in @addr. If resolution succeeds, @addrs will be filled
+ * with all the resolved addresses. @naddrs will specify
+ * the number of entries allocated in @addrs. The caller
+ * is responsible for freeing each entry in @addrs, as
+ * well as @addrs itself. @naddrs is guaranteed to be
+ * greater than zero on success.
+ *
+ * DNS resolution will be done synchronously so execution
+ * of the caller may be blocked for an arbitrary length
+ * of time.
+ *
+ * Returns: 0 if resolution was successful, -1 on error
+ */
+int qio_dns_resolver_lookup_sync(QIODNSResolver *resolver,
+ SocketAddress *addr,
+ size_t *naddrs,
+ SocketAddress ***addrs,
+ Error **errp);
+
+/**
+ * qio_dns_resolver_lookup_async:
+ * @resolver: the DNS resolver instance
+ * @addr: the address to resolve
+ * @func: the callback to invoke on lookup completion
+ * @opaque: data blob to pass to @func
+ * @notify: the callback to free @opaque, or NULL
+ *
+ * This will attempt to resolve the address provided
+ * in @addr. The callback @func will be invoked when
+ * resolution has either completed or failed. On
+ * success, the @func should call the method
+ * qio_dns_resolver_lookup_result() to obtain the
+ * results.
+ *
+ * DNS resolution will be done asynchronously so execution
+ * of the caller will not be blocked.
+ */
+void qio_dns_resolver_lookup_async(QIODNSResolver *resolver,
+ SocketAddress *addr,
+ QIOTaskFunc func,
+ gpointer opaque,
+ GDestroyNotify notify);
+
+/**
+ * qio_dns_resolver_lookup_result:
+ * @resolver: the DNS resolver instance
+ * @task: the task object to get results for
+ * @naddr: pointer to hold number of resolved addresses
+ * @addrs: pointer to hold resolved addresses
+ *
+ * This method should be called from the callback passed
+ * to qio_dns_resolver_lookup_async() in order to obtain
+ * results. @addrs will be filled with all the resolved
+ * addresses. @naddrs will specify the number of entries
+ * allocated in @addrs. The caller is responsible for
+ * freeing each entry in @addrs, as well as @addrs itself.
+ */
+void qio_dns_resolver_lookup_result(QIODNSResolver *resolver,
+ QIOTask *task,
+ size_t *naddrs,
+ SocketAddress ***addrs);
+
+#endif /* QIO_DNS_RESOLVER_H */
diff --git a/include/io/task.h b/include/io/task.h
index 42028cb424..6021f51336 100644
--- a/include/io/task.h
+++ b/include/io/task.h
@@ -26,13 +26,11 @@
typedef struct QIOTask QIOTask;
-typedef void (*QIOTaskFunc)(Object *source,
- Error *err,
+typedef void (*QIOTaskFunc)(QIOTask *task,
gpointer opaque);
-typedef int (*QIOTaskWorker)(QIOTask *task,
- Error **errp,
- gpointer opaque);
+typedef void (*QIOTaskWorker)(QIOTask *task,
+ gpointer opaque);
/**
* QIOTask:
@@ -44,12 +42,12 @@ typedef int (*QIOTaskWorker)(QIOTask *task,
* a public API which accepts a task callback:
*
* <example>
- * <title>Task callback function signature</title>
+ * <title>Task function signature</title>
* <programlisting>
* void myobject_operation(QMyObject *obj,
* QIOTaskFunc *func,
* gpointer opaque,
- * GDestroyNotify *notify);
+ * GDestroyNotify notify);
* </programlisting>
* </example>
*
@@ -57,17 +55,41 @@ typedef int (*QIOTaskWorker)(QIOTask *task,
* is data to pass to it. The optional 'notify' function is used
* to free 'opaque' when no longer needed.
*
- * Now, lets say the implementation of this method wants to set
- * a timer to run once a second checking for completion of some
- * activity. It would do something like
+ * When the operation completes, the 'func' callback will be
+ * invoked, allowing the calling code to determine the result
+ * of the operation. An example QIOTaskFunc implementation may
+ * look like
*
* <example>
- * <title>Task callback function implementation</title>
+ * <title>Task callback implementation</title>
+ * <programlisting>
+ * static void myobject_operation_notify(QIOTask *task,
+ * gpointer opaque)
+ * {
+ * Error *err = NULL;
+ * if (qio_task_propagate_error(task, &err)) {
+ * ...deal with the failure...
+ * error_free(err);
+ * } else {
+ * QMyObject *src = QMY_OBJECT(qio_task_get_source(task));
+ * ...deal with the completion...
+ * }
+ * }
+ * </programlisting>
+ * </example>
+ *
+ * Now, lets say the implementation of the method using the
+ * task wants to set a timer to run once a second checking
+ * for completion of some activity. It would do something
+ * like
+ *
+ * <example>
+ * <title>Task function implementation</title>
* <programlisting>
* void myobject_operation(QMyObject *obj,
* QIOTaskFunc *func,
* gpointer opaque,
- * GDestroyNotify *notify)
+ * GDestroyNotify notify)
* {
* QIOTask *task;
*
@@ -102,8 +124,8 @@ typedef int (*QIOTaskWorker)(QIOTask *task,
*
* ...check something important...
* if (err) {
- * qio_task_abort(task, err);
- * error_free(task);
+ * qio_task_set_error(task, err);
+ * qio_task_complete(task);
* return FALSE;
* } else if (...work is completed ...) {
* qio_task_complete(task);
@@ -115,6 +137,10 @@ typedef int (*QIOTaskWorker)(QIOTask *task,
* </programlisting>
* </example>
*
+ * The 'qio_task_complete' call in this method will trigger
+ * the callback func 'myobject_operation_notify' shown
+ * earlier to deal with the results.
+ *
* Once this function returns false, object_unref will be called
* automatically on the task causing it to be released and the
* ref on QMyObject dropped too.
@@ -136,25 +162,23 @@ typedef int (*QIOTaskWorker)(QIOTask *task,
* socket listen using QIOTask would require:
*
* <example>
- * static int myobject_listen_worker(QIOTask *task,
- * Error **errp,
- * gpointer opaque)
+ * static void myobject_listen_worker(QIOTask *task,
+ * gpointer opaque)
* {
* QMyObject obj = QMY_OBJECT(qio_task_get_source(task));
* SocketAddress *addr = opaque;
+ * Error *err = NULL;
*
- * obj->fd = socket_listen(addr, errp);
- * if (obj->fd < 0) {
- * return -1;
- * }
- * return 0;
+ * obj->fd = socket_listen(addr, &err);
+ *
+ qio_task_set_error(task, err);
* }
*
* void myobject_listen_async(QMyObject *obj,
* SocketAddress *addr,
* QIOTaskFunc *func,
* gpointer opaque,
- * GDestroyNotify *notify)
+ * GDestroyNotify notify)
* {
* QIOTask *task;
* SocketAddress *addrCopy;
@@ -187,8 +211,8 @@ typedef int (*QIOTaskWorker)(QIOTask *task,
* 'err' attribute in the task object to determine if
* the operation was successful or not.
*
- * The returned task will be released when one of
- * qio_task_abort() or qio_task_complete() are invoked.
+ * The returned task will be released when qio_task_complete()
+ * is invoked.
*
* Returns: the task struct
*/
@@ -204,10 +228,8 @@ QIOTask *qio_task_new(Object *source,
* @opaque: opaque data to pass to @worker
* @destroy: function to free @opaque
*
- * Run a task in a background thread. If @worker
- * returns 0 it will call qio_task_complete() in
- * the main event thread context. If @worker
- * returns -1 it will call qio_task_abort() in
+ * Run a task in a background thread. When @worker
+ * returns it will call qio_task_complete() in
* the main event thread context.
*/
void qio_task_run_in_thread(QIOTask *task,
@@ -219,24 +241,69 @@ void qio_task_run_in_thread(QIOTask *task,
* qio_task_complete:
* @task: the task struct
*
- * Mark the operation as successfully completed
- * and free the memory for @task.
+ * Invoke the completion callback for @task and
+ * then free its memory.
*/
void qio_task_complete(QIOTask *task);
+
+/**
+ * qio_task_set_error:
+ * @task: the task struct
+ * @err: pointer to the error, or NULL
+ *
+ * Associate an error with the task, which can later
+ * be retrieved with the qio_task_propagate_error()
+ * method. This method takes ownership of @err, so
+ * it is not valid to access it after this call
+ * completes. If @err is NULL this is a no-op. If
+ * this is call multiple times, only the first
+ * provided @err will be recorded, later ones will
+ * be discarded and freed.
+ */
+void qio_task_set_error(QIOTask *task,
+ Error *err);
+
+
/**
- * qio_task_abort:
+ * qio_task_propagate_error:
* @task: the task struct
- * @err: the error to record for the operation
+ * @errp: pointer to a NULL-initialized error object
+ *
+ * Propagate the error associated with @task
+ * into @errp.
+ *
+ * Returns: true if an error was propagated, false otherwise
+ */
+bool qio_task_propagate_error(QIOTask *task,
+ Error **errp);
+
+
+/**
+ * qio_task_set_result_pointer:
+ * @task: the task struct
+ * @result: pointer to the result data
+ *
+ * Associate an opaque result with the task,
+ * which can later be retrieved with the
+ * qio_task_get_result_pointer() method
+ *
+ */
+void qio_task_set_result_pointer(QIOTask *task,
+ gpointer result,
+ GDestroyNotify notify);
+
+
+/**
+ * qio_task_get_result_pointer:
+ * @task: the task struct
+ *
+ * Retrieve the opaque result data associated
+ * with the task, if any.
*
- * Mark the operation as failed, with @err providing
- * details about the failure. The @err may be freed
- * afer the function returns, as the notification
- * callback is invoked synchronously. The @task will
- * be freed when this call completes.
+ * Returns: the task result, or NULL
*/
-void qio_task_abort(QIOTask *task,
- Error *err);
+gpointer qio_task_get_result_pointer(QIOTask *task);
/**
@@ -244,9 +311,10 @@ void qio_task_abort(QIOTask *task,
* @task: the task struct
*
* Get the source object associated with the background
- * task. This returns a new reference to the object,
- * which the caller must released with object_unref()
- * when no longer required.
+ * task. The caller does not own a reference on the
+ * returned Object, and so should call object_ref()
+ * if it wants to keep the object pointer outside the
+ * lifetime of the QIOTask object.
*
* Returns: the source object
*/
diff --git a/include/migration/migration.h b/include/migration/migration.h
index c309d23370..af9135f0a7 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -38,6 +38,9 @@
#define QEMU_VM_COMMAND 0x08
#define QEMU_VM_SECTION_FOOTER 0x7e
+/* for vl.c */
+extern int only_migratable;
+
struct MigrationParams {
bool blk;
bool shared;
@@ -177,6 +180,9 @@ struct MigrationState
/* Flag set once the migration thread is running (and needs joining) */
bool migration_thread_running;
+ /* Flag set once the migration thread called bdrv_inactivate_all */
+ bool block_inactive;
+
/* Queue of outstanding page requests from the destination */
QemuMutex src_page_req_mutex;
QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
@@ -240,6 +246,7 @@ void remove_migration_state_change_notifier(Notifier *notify);
MigrationState *migrate_init(const MigrationParams *params);
bool migration_is_blocked(Error **errp);
bool migration_in_setup(MigrationState *);
+bool migration_is_idle(MigrationState *s);
bool migration_has_finished(MigrationState *);
bool migration_has_failed(MigrationState *);
/* True if outgoing migration has entered postcopy phase */
@@ -284,8 +291,12 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis);
* @migrate_add_blocker - prevent migration from proceeding
*
* @reason - an error to be returned whenever migration is attempted
+ *
+ * @errp - [out] The reason (if any) we cannot block migration right now.
+ *
+ * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
*/
-void migrate_add_blocker(Error *reason);
+int migrate_add_blocker(Error *reason, Error **errp);
/**
* @migrate_del_blocker - remove a blocking error from migration
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 1638ee57f7..3bbe3ed984 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -81,11 +81,20 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
typedef struct VMStateInfo VMStateInfo;
typedef struct VMStateDescription VMStateDescription;
-
+typedef struct VMStateField VMStateField;
+
+/* VMStateInfo allows customized migration of objects that don't fit in
+ * any category in VMStateFlags. Additional information is always passed
+ * into get and put in terms of field and vmdesc parameters. However
+ * these two parameters should only be used in cases when customized
+ * handling is needed, such as QTAILQ. For primitive data types such as
+ * integer, field and vmdesc parameters should be ignored inside get/put.
+ */
struct VMStateInfo {
const char *name;
- int (*get)(QEMUFile *f, void *pv, size_t size);
- void (*put)(QEMUFile *f, void *pv, size_t size);
+ int (*get)(QEMUFile *f, void *pv, size_t size, VMStateField *field);
+ int (*put)(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc);
};
enum VMStateFlags {
@@ -186,7 +195,13 @@ enum VMStateFlags {
VMS_MULTIPLY_ELEMENTS = 0x4000,
};
-typedef struct {
+typedef enum {
+ MIG_PRI_DEFAULT = 0,
+ MIG_PRI_IOMMU, /* Must happen before PCI devices */
+ MIG_PRI_MAX,
+} MigrationPriority;
+
+struct VMStateField {
const char *name;
size_t offset;
size_t size;
@@ -199,7 +214,7 @@ typedef struct {
const VMStateDescription *vmsd;
int version_id;
bool (*field_exists)(void *opaque, int version_id);
-} VMStateField;
+};
struct VMStateDescription {
const char *name;
@@ -207,6 +222,7 @@ struct VMStateDescription {
int version_id;
int minimum_version_id;
int minimum_version_id_old;
+ MigrationPriority priority;
LoadStateHandler *load_state_old;
int (*pre_load)(void *opaque);
int (*post_load)(void *opaque, int version_id);
@@ -244,6 +260,7 @@ extern const VMStateInfo vmstate_info_timer;
extern const VMStateInfo vmstate_info_buffer;
extern const VMStateInfo vmstate_info_unused_buffer;
extern const VMStateInfo vmstate_info_bitmap;
+extern const VMStateInfo vmstate_info_qtailq;
#define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0)
#define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0)
@@ -655,6 +672,25 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = offsetof(_state, _field), \
}
+/* For migrating a QTAILQ.
+ * Target QTAILQ needs be properly initialized.
+ * _type: type of QTAILQ element
+ * _next: name of QTAILQ entry field in QTAILQ element
+ * _vmsd: VMSD for QTAILQ element
+ * size: size of QTAILQ element
+ * start: offset of QTAILQ entry in QTAILQ element
+ */
+#define VMSTATE_QTAILQ_V(_field, _state, _version, _vmsd, _type, _next) \
+{ \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .info = &vmstate_info_qtailq, \
+ .offset = offsetof(_state, _field), \
+ .start = offsetof(_type, _next), \
+}
+
/* _f : field name
_f_n : num of elements field_name
_n : num of elements
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 5a08efffef..afc1499eb9 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -35,4 +35,6 @@ int vhost_set_vring_enable(NetClientState * nc, int enable);
uint64_t vhost_net_get_acked_features(VHostNetState *net);
+int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu);
+
#endif
diff --git a/include/qapi/dealloc-visitor.h b/include/qapi/dealloc-visitor.h
index b3e5c85fd8..c36715fdf3 100644
--- a/include/qapi/dealloc-visitor.h
+++ b/include/qapi/dealloc-visitor.h
@@ -19,7 +19,7 @@
typedef struct QapiDeallocVisitor QapiDeallocVisitor;
/*
- * The dealloc visitor is primarly used only by generated
+ * The dealloc visitor is primarily used only by generated
* qapi_free_FOO() functions, and is the only visitor designed to work
* correctly in the face of a partially-constructed QAPI tree.
*/
diff --git a/include/qapi/error.h b/include/qapi/error.h
index 0576659603..7e532d00e9 100644
--- a/include/qapi/error.h
+++ b/include/qapi/error.h
@@ -170,6 +170,9 @@ void error_setg_internal(Error **errp,
* Just like error_setg(), with @os_error info added to the message.
* If @os_error is non-zero, ": " + strerror(os_error) is appended to
* the human-readable error message.
+ *
+ * The value of errno (which usually can get clobbered by almost any
+ * function call) will be preserved.
*/
#define error_setg_errno(errp, os_error, fmt, ...) \
error_setg_errno_internal((errp), __FILE__, __LINE__, __func__, \
diff --git a/include/qemu/config-file.h b/include/qemu/config-file.h
index 8d4b2b6d94..c80d5c8a33 100644
--- a/include/qemu/config-file.h
+++ b/include/qemu/config-file.h
@@ -23,8 +23,4 @@ int qemu_read_config_file(const char *filename);
void qemu_config_parse_qdict(QDict *options, QemuOptsList **lists,
Error **errp);
-/* Read default QEMU config files
- */
-int qemu_read_default_config_files(bool userconfig);
-
#endif /* QEMU_CONFIG_FILE_H */
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index e6a60d55fd..12584ed1b7 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -71,6 +71,12 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque);
void qemu_coroutine_enter(Coroutine *coroutine);
/**
+ * Transfer control to a coroutine if it's not active (i.e. part of the call
+ * stack of the running coroutine). Otherwise, do nothing.
+ */
+void qemu_coroutine_enter_if_inactive(Coroutine *co);
+
+/**
* Transfer control back to a coroutine's caller
*
* This function does not return until the coroutine is re-entered using
diff --git a/include/qemu/event_notifier.h b/include/qemu/event_notifier.h
index e326990db4..599c99f1a5 100644
--- a/include/qemu/event_notifier.h
+++ b/include/qemu/event_notifier.h
@@ -34,9 +34,6 @@ int event_notifier_init(EventNotifier *, int active);
void event_notifier_cleanup(EventNotifier *);
int event_notifier_set(EventNotifier *);
int event_notifier_test_and_clear(EventNotifier *);
-int event_notifier_set_handler(EventNotifier *,
- bool is_external,
- EventNotifierHandler *);
#ifdef CONFIG_POSIX
void event_notifier_init_fd(EventNotifier *, int fd);
diff --git a/include/qemu/futex.h b/include/qemu/futex.h
new file mode 100644
index 0000000000..bb7dc9e296
--- /dev/null
+++ b/include/qemu/futex.h
@@ -0,0 +1,36 @@
+/*
+ * Wrappers around Linux futex syscall
+ *
+ * Copyright Red Hat, Inc. 2017
+ *
+ * Author:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <sys/syscall.h>
+#include <linux/futex.h>
+
+#define qemu_futex(...) syscall(__NR_futex, __VA_ARGS__)
+
+static inline void qemu_futex_wake(void *f, int n)
+{
+ qemu_futex(f, FUTEX_WAKE, n, NULL, NULL, 0);
+}
+
+static inline void qemu_futex_wait(void *f, unsigned val)
+{
+ while (qemu_futex(f, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ return;
+ case EINTR:
+ break; /* get out of switch and retry */
+ default:
+ abort();
+ }
+ }
+}
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index 46187bbc7e..96288d0bce 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -327,7 +327,7 @@ static inline int ctpop8(uint8_t val)
#else
val = (val & 0x55) + ((val >> 1) & 0x55);
val = (val & 0x33) + ((val >> 2) & 0x33);
- val = (val & 0x0f) + ((val >> 4) & 0x0f);
+ val = (val + (val >> 4)) & 0x0f;
return val;
#endif
@@ -344,8 +344,8 @@ static inline int ctpop16(uint16_t val)
#else
val = (val & 0x5555) + ((val >> 1) & 0x5555);
val = (val & 0x3333) + ((val >> 2) & 0x3333);
- val = (val & 0x0f0f) + ((val >> 4) & 0x0f0f);
- val = (val & 0x00ff) + ((val >> 8) & 0x00ff);
+ val = (val + (val >> 4)) & 0x0f0f;
+ val = (val + (val >> 8)) & 0x00ff;
return val;
#endif
@@ -360,11 +360,10 @@ static inline int ctpop32(uint32_t val)
#if QEMU_GNUC_PREREQ(3, 4)
return __builtin_popcount(val);
#else
- val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
- val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
- val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
- val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
- val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
+ val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ val = (val + (val >> 4)) & 0x0f0f0f0f;
+ val = (val * 0x01010101) >> 24;
return val;
#endif
@@ -379,12 +378,10 @@ static inline int ctpop64(uint64_t val)
#if QEMU_GNUC_PREREQ(3, 4)
return __builtin_popcountll(val);
#else
- val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
- val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
- val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
- val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) & 0x00ff00ff00ff00ffULL);
- val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) & 0x0000ffff0000ffffULL);
- val = (val & 0x00000000ffffffffULL) + ((val >> 32) & 0x00000000ffffffffULL);
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
+ val = (val * 0x0101010101010101ULL) >> 56;
return val;
#endif
diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h
index a9d4f23cd9..d7e24af78d 100644
--- a/include/qemu/main-loop.h
+++ b/include/qemu/main-loop.h
@@ -203,6 +203,21 @@ void qemu_set_fd_handler(int fd,
IOHandler *fd_write,
void *opaque);
+
+/**
+ * event_notifier_set_handler: Register an EventNotifier with the main loop
+ *
+ * This function tells the main loop to wake up whenever the
+ * #EventNotifier was set.
+ *
+ * @e: The #EventNotifier to be observed.
+ *
+ * @handler: A level-triggered callback that is fired when @e
+ * has been set. @e is passed to it as a parameter.
+ */
+void event_notifier_set_handler(EventNotifier *e,
+ EventNotifierHandler *handler);
+
GSource *iohandler_get_g_source(void);
AioContext *iohandler_get_aio_context(void);
#ifdef CONFIG_POSIX
diff --git a/include/qemu/qht.h b/include/qemu/qht.h
index 311139b85a..56c2c7784c 100644
--- a/include/qemu/qht.h
+++ b/include/qemu/qht.h
@@ -72,7 +72,7 @@ void qht_destroy(struct qht *ht);
* In case of successful operation, smp_wmb() is implied before the pointer is
* inserted into the hash table.
*
- * Returns true on sucess.
+ * Returns true on success.
* Returns false if the @p-@hash pair already exists in the hash table.
*/
bool qht_insert(struct qht *ht, void *p, uint32_t hash);
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 342073fb4d..35292c3155 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -438,4 +438,64 @@ struct { \
#define QTAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define field_at_offset(base, offset, type) \
+ ((type) (((char *) (base)) + (offset)))
+
+typedef struct DUMMY_Q_ENTRY DUMMY_Q_ENTRY;
+typedef struct DUMMY_Q DUMMY_Q;
+
+struct DUMMY_Q_ENTRY {
+ QTAILQ_ENTRY(DUMMY_Q_ENTRY) next;
+};
+
+struct DUMMY_Q {
+ QTAILQ_HEAD(DUMMY_Q_HEAD, DUMMY_Q_ENTRY) head;
+};
+
+#define dummy_q ((DUMMY_Q *) 0)
+#define dummy_qe ((DUMMY_Q_ENTRY *) 0)
+
+/*
+ * Offsets of layout of a tail queue head.
+ */
+#define QTAILQ_FIRST_OFFSET (offsetof(typeof(dummy_q->head), tqh_first))
+#define QTAILQ_LAST_OFFSET (offsetof(typeof(dummy_q->head), tqh_last))
+/*
+ * Raw access of elements of a tail queue
+ */
+#define QTAILQ_RAW_FIRST(head) \
+ (*field_at_offset(head, QTAILQ_FIRST_OFFSET, void **))
+#define QTAILQ_RAW_TQH_LAST(head) \
+ (*field_at_offset(head, QTAILQ_LAST_OFFSET, void ***))
+
+/*
+ * Offsets of layout of a tail queue element.
+ */
+#define QTAILQ_NEXT_OFFSET (offsetof(typeof(dummy_qe->next), tqe_next))
+#define QTAILQ_PREV_OFFSET (offsetof(typeof(dummy_qe->next), tqe_prev))
+
+/*
+ * Raw access of elements of a tail entry
+ */
+#define QTAILQ_RAW_NEXT(elm, entry) \
+ (*field_at_offset(elm, entry + QTAILQ_NEXT_OFFSET, void **))
+#define QTAILQ_RAW_TQE_PREV(elm, entry) \
+ (*field_at_offset(elm, entry + QTAILQ_PREV_OFFSET, void ***))
+/*
+ * Tail queue tranversal using pointer arithmetic.
+ */
+#define QTAILQ_RAW_FOREACH(elm, head, entry) \
+ for ((elm) = QTAILQ_RAW_FIRST(head); \
+ (elm); \
+ (elm) = QTAILQ_RAW_NEXT(elm, entry))
+/*
+ * Tail queue insertion using pointer arithmetic.
+ */
+#define QTAILQ_RAW_INSERT_TAIL(head, elm, entry) do { \
+ QTAILQ_RAW_NEXT(elm, entry) = NULL; \
+ QTAILQ_RAW_TQE_PREV(elm, entry) = QTAILQ_RAW_TQH_LAST(head); \
+ *QTAILQ_RAW_TQH_LAST(head) = (elm); \
+ QTAILQ_RAW_TQH_LAST(head) = &QTAILQ_RAW_NEXT(elm, entry); \
+} while (/*CONSTCOND*/0)
+
#endif /* QEMU_SYS_QUEUE_H */
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index 5589e6842b..5f1bab9b3e 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -32,6 +32,8 @@ int socket_set_fast_reuse(int fd);
*/
typedef void NonBlockingConnectHandler(int fd, Error *err, void *opaque);
+int inet_ai_family_from_address(InetSocketAddress *addr,
+ Error **errp);
InetSocketAddress *inet_parse(const char *str, Error **errp);
int inet_connect(const char *str, Error **errp);
int inet_connect_saddr(InetSocketAddress *saddr, Error **errp,
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index e8e665f020..9910f49b3a 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -8,6 +8,7 @@ typedef struct QemuMutex QemuMutex;
typedef struct QemuCond QemuCond;
typedef struct QemuSemaphore QemuSemaphore;
typedef struct QemuEvent QemuEvent;
+typedef struct QemuLockCnt QemuLockCnt;
typedef struct QemuThread QemuThread;
#ifdef _WIN32
@@ -98,4 +99,115 @@ static inline void qemu_spin_unlock(QemuSpin *spin)
__sync_lock_release(&spin->value);
}
+struct QemuLockCnt {
+#ifndef CONFIG_LINUX
+ QemuMutex mutex;
+#endif
+ unsigned count;
+};
+
+/**
+ * qemu_lockcnt_init: initialize a QemuLockcnt
+ * @lockcnt: the lockcnt to initialize
+ *
+ * Initialize lockcnt's counter to zero and prepare its mutex
+ * for usage.
+ */
+void qemu_lockcnt_init(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_destroy: destroy a QemuLockcnt
+ * @lockcnt: the lockcnt to destruct
+ *
+ * Destroy lockcnt's mutex.
+ */
+void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_inc: increment a QemuLockCnt's counter
+ * @lockcnt: the lockcnt to operate on
+ *
+ * If the lockcnt's count is zero, wait for critical sections
+ * to finish and increment lockcnt's count to 1. If the count
+ * is not zero, just increment it.
+ *
+ * Because this function can wait on the mutex, it must not be
+ * called while the lockcnt's mutex is held by the current thread.
+ * For the same reason, qemu_lockcnt_inc can also contribute to
+ * AB-BA deadlocks. This is a sample deadlock scenario:
+ *
+ * thread 1 thread 2
+ * -------------------------------------------------------
+ * qemu_lockcnt_lock(&lc1);
+ * qemu_lockcnt_lock(&lc2);
+ * qemu_lockcnt_inc(&lc2);
+ * qemu_lockcnt_inc(&lc1);
+ */
+void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
+ * @lockcnt: the lockcnt to operate on
+ */
+void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
+ * possibly lock it.
+ * @lockcnt: the lockcnt to operate on
+ *
+ * Decrement lockcnt's count. If the new count is zero, lock
+ * the mutex and return true. Otherwise, return false.
+ */
+bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
+ * lock it.
+ * @lockcnt: the lockcnt to operate on
+ *
+ * If the count is 1, decrement the count to zero, lock
+ * the mutex and return true. Otherwise, return false.
+ */
+bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
+ * @lockcnt: the lockcnt to operate on
+ *
+ * Remember that concurrent visits are not blocked unless the count is
+ * also zero. You can use qemu_lockcnt_count to check for this inside a
+ * critical section.
+ */
+void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
+ * @lockcnt: the lockcnt to operate on.
+ */
+void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
+ * @lockcnt: the lockcnt to operate on.
+ *
+ * This is the same as
+ *
+ * qemu_lockcnt_unlock(lockcnt);
+ * qemu_lockcnt_inc(lockcnt);
+ *
+ * but more efficient.
+ */
+void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
+
+/**
+ * qemu_lockcnt_count: query a LockCnt's count.
+ * @lockcnt: the lockcnt to query.
+ *
+ * Note that the count can change at any time. Still, while the
+ * lockcnt is locked, one can usefully check whether the count
+ * is non-zero.
+ */
+unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
+
#endif
diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h
index 83cf98cbd8..a83fe8e749 100644
--- a/include/qemu/xattr.h
+++ b/include/qemu/xattr.h
@@ -14,7 +14,7 @@
#define QEMU_XATTR_H
/*
- * Modern distributions (e.g. Fedora 15, have no libattr.so, place attr.h
+ * Modern distributions (e.g. Fedora 15), have no libattr.so, place attr.h
* in /usr/include/sys, and don't have ENOATTR.
*/
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 3f79a8e955..ca4d0fb1b4 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -227,6 +227,8 @@ struct CPUWatchpoint {
struct KVMState;
struct kvm_run;
+struct hax_vcpu_state;
+
#define TB_JMP_CACHE_BITS 12
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
@@ -392,6 +394,9 @@ struct CPUState {
(absolute value) offset as small as possible. This reduces code
size, especially for hosts without large memory offsets. */
uint32_t tcg_exit_req;
+
+ bool hax_vcpu_dirty;
+ struct hax_vcpu_state *hax_vcpu;
};
QTAILQ_HEAD(CPUTailQ, CPUState);
diff --git a/include/qom/object.h b/include/qom/object.h
index 5ecc2d166d..cd0f412ce9 100644
--- a/include/qom/object.h
+++ b/include/qom/object.h
@@ -432,7 +432,7 @@ struct Object
* @class_base_init: This function is called for all base classes after all
* parent class initialization has occurred, but before the class itself
* is initialized. This is the function to use to undo the effects of
- * memcpy from the parent class to the descendents.
+ * memcpy from the parent class to the descendants.
* @class_finalize: This function is called during class destruction and is
* meant to release and dynamic parameters allocated by @class_init.
* @class_data: Data to pass to the @class_init, @class_base_init and
@@ -587,18 +587,6 @@ struct InterfaceClass
Object *object_new(const char *typename);
/**
- * object_new_with_type:
- * @type: The type of the object to instantiate.
- *
- * This function will initialize a new object using heap allocated memory.
- * The returned object has a reference count of 1, and will be freed when
- * the last reference is dropped.
- *
- * Returns: The newly allocated and instantiated object.
- */
-Object *object_new_with_type(Type type);
-
-/**
* object_new_with_props:
* @typename: The name of the type of the object to instantiate.
* @parent: the parent object
@@ -727,18 +715,6 @@ int object_set_propv(Object *obj,
va_list vargs);
/**
- * object_initialize_with_type:
- * @data: A pointer to the memory to be used for the object.
- * @size: The maximum size available at @data for the object.
- * @type: The type of the object to instantiate.
- *
- * This function will initialize an object. The memory for the object should
- * have already been allocated. The returned object has a reference count of 1,
- * and will be finalized when the last reference is dropped.
- */
-void object_initialize_with_type(void *data, size_t size, Type type);
-
-/**
* object_initialize:
* @obj: A pointer to the memory to be used for the object.
* @size: The maximum size available at @obj for the object.
diff --git a/include/qom/object_interfaces.h b/include/qom/object_interfaces.h
index 8b17f4def7..fdd7603c84 100644
--- a/include/qom/object_interfaces.h
+++ b/include/qom/object_interfaces.h
@@ -76,23 +76,6 @@ void user_creatable_complete(Object *obj, Error **errp);
bool user_creatable_can_be_deleted(UserCreatable *uc, Error **errp);
/**
- * user_creatable_add:
- * @qdict: the object definition
- * @v: the visitor
- * @errp: if an error occurs, a pointer to an area to store the error
- *
- * Create an instance of the user creatable object whose type
- * is defined in @qdict by the 'qom-type' field, placing it
- * in the object composition tree with name provided by the
- * 'id' field. The remaining fields in @qdict are used to
- * initialize the object properties.
- *
- * Returns: the newly created object or NULL on error
- */
-Object *user_creatable_add(const QDict *qdict,
- Visitor *v, Error **errp);
-
-/**
* user_creatable_add_type:
* @type: the object type name
* @id: the unique ID for the object
diff --git a/include/standard-headers/linux/virtio_crypto.h b/include/standard-headers/linux/virtio_crypto.h
index 82275a84d8..5ff0b4ee59 100644
--- a/include/standard-headers/linux/virtio_crypto.h
+++ b/include/standard-headers/linux/virtio_crypto.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_VIRTIO_CRYPTO_H
-#define _LINUX_VIRTIO_CRYPTO_H
+#ifndef _VIRTIO_CRYPTO_H
+#define _VIRTIO_CRYPTO_H
/* This header is BSD licensed so anyone can use the definitions to implement
* compatible drivers/servers.
*
@@ -14,52 +14,54 @@
* 3. Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE. */
-
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
#include "standard-headers/linux/types.h"
-#include "standard-headers/linux/virtio_config.h"
#include "standard-headers/linux/virtio_types.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/virtio_config.h"
#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
-#define VIRTIO_CRYPTO_SERVICE_HASH 1
-#define VIRTIO_CRYPTO_SERVICE_MAC 2
-#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+#define VIRTIO_CRYPTO_SERVICE_HASH 1
+#define VIRTIO_CRYPTO_SERVICE_MAC 2
+#define VIRTIO_CRYPTO_SERVICE_AEAD 3
#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
struct virtio_crypto_ctrl_header {
#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
- __virtio32 opcode;
- __virtio32 algo;
- __virtio32 flag;
- /* data virtqueue id */
- __virtio32 queue_id;
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+ uint32_t opcode;
+ uint32_t algo;
+ uint32_t flag;
+ /* data virtqueue id */
+ uint32_t queue_id;
};
struct virtio_crypto_cipher_session_para {
@@ -78,26 +80,27 @@ struct virtio_crypto_cipher_session_para {
#define VIRTIO_CRYPTO_CIPHER_AES_F8 12
#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13
#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14
- __virtio32 algo;
- /* length of key */
- __virtio32 keylen;
+ uint32_t algo;
+ /* length of key */
+ uint32_t keylen;
#define VIRTIO_CRYPTO_OP_ENCRYPT 1
#define VIRTIO_CRYPTO_OP_DECRYPT 2
- /* encrypt or decrypt */
- __virtio32 op;
- __virtio32 padding;
+ /* encrypt or decrypt */
+ uint32_t op;
+ uint32_t padding;
};
struct virtio_crypto_session_input {
- /* Device-writable part */
- __virtio64 session_id;
- __virtio32 status;
- __virtio32 padding;
+ /* Device-writable part */
+ uint64_t session_id;
+ uint32_t status;
+ uint32_t padding;
};
struct virtio_crypto_cipher_session_req {
- struct virtio_crypto_cipher_session_para para;
+ struct virtio_crypto_cipher_session_para para;
+ uint8_t padding[32];
};
struct virtio_crypto_hash_session_para {
@@ -114,13 +117,15 @@ struct virtio_crypto_hash_session_para {
#define VIRTIO_CRYPTO_HASH_SHA3_512 10
#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11
#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12
- __virtio32 algo;
- /* hash result length */
- __virtio32 hash_result_len;
+ uint32_t algo;
+ /* hash result length */
+ uint32_t hash_result_len;
+ uint8_t padding[8];
};
struct virtio_crypto_hash_create_session_req {
- struct virtio_crypto_hash_session_para para;
+ struct virtio_crypto_hash_session_para para;
+ uint8_t padding[40];
};
struct virtio_crypto_mac_session_para {
@@ -140,16 +145,17 @@ struct virtio_crypto_mac_session_para {
#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49
#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50
#define VIRTIO_CRYPTO_MAC_XCBC_AES 53
- __virtio32 algo;
- /* hash result length */
- __virtio32 hash_result_len;
- /* length of authenticated key */
- __virtio32 auth_key_len;
- __virtio32 padding;
+ uint32_t algo;
+ /* hash result length */
+ uint32_t hash_result_len;
+ /* length of authenticated key */
+ uint32_t auth_key_len;
+ uint32_t padding;
};
struct virtio_crypto_mac_create_session_req {
- struct virtio_crypto_mac_session_para para;
+ struct virtio_crypto_mac_session_para para;
+ uint8_t padding[40];
};
struct virtio_crypto_aead_session_para {
@@ -157,273 +163,288 @@ struct virtio_crypto_aead_session_para {
#define VIRTIO_CRYPTO_AEAD_GCM 1
#define VIRTIO_CRYPTO_AEAD_CCM 2
#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3
- __virtio32 algo;
- /* length of key */
- __virtio32 key_len;
- /* digest result length */
- __virtio32 digest_result_len;
- /* length of the additional authenticated data (AAD) in bytes */
- __virtio32 aad_len;
- /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
- __virtio32 op;
- __virtio32 padding;
+ uint32_t algo;
+ /* length of key */
+ uint32_t key_len;
+ /* hash result length */
+ uint32_t hash_result_len;
+ /* length of the additional authenticated data (AAD) in bytes */
+ uint32_t aad_len;
+ /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
+ uint32_t op;
+ uint32_t padding;
};
struct virtio_crypto_aead_create_session_req {
- struct virtio_crypto_aead_session_para para;
+ struct virtio_crypto_aead_session_para para;
+ uint8_t padding[32];
};
struct virtio_crypto_alg_chain_session_para {
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
- __virtio32 alg_chain_order;
+ uint32_t alg_chain_order;
/* Plain hash */
#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1
/* Authenticated hash (mac) */
#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2
/* Nested hash */
#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3
- __virtio32 hash_mode;
- struct virtio_crypto_cipher_session_para cipher_param;
- union {
- struct virtio_crypto_hash_session_para hash_param;
- struct virtio_crypto_mac_session_para mac_param;
- } u;
- /* length of the additional authenticated data (AAD) in bytes */
- __virtio32 aad_len;
- __virtio32 padding;
+ uint32_t hash_mode;
+ struct virtio_crypto_cipher_session_para cipher_param;
+ union {
+ struct virtio_crypto_hash_session_para hash_param;
+ struct virtio_crypto_mac_session_para mac_param;
+ uint8_t padding[16];
+ } u;
+ /* length of the additional authenticated data (AAD) in bytes */
+ uint32_t aad_len;
+ uint32_t padding;
};
struct virtio_crypto_alg_chain_session_req {
- struct virtio_crypto_alg_chain_session_para para;
+ struct virtio_crypto_alg_chain_session_para para;
};
struct virtio_crypto_sym_create_session_req {
- union {
- struct virtio_crypto_cipher_session_req cipher;
- struct virtio_crypto_alg_chain_session_req chain;
- } u;
+ union {
+ struct virtio_crypto_cipher_session_req cipher;
+ struct virtio_crypto_alg_chain_session_req chain;
+ uint8_t padding[48];
+ } u;
- /* Device-readable part */
+ /* Device-readable part */
/* No operation */
#define VIRTIO_CRYPTO_SYM_OP_NONE 0
/* Cipher only operation on the data */
#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1
-/* Chain any cipher with any hash or mac operation. The order
- depends on the value of alg_chain_order param */
+/*
+ * Chain any cipher with any hash or mac operation. The order
+ * depends on the value of alg_chain_order param
+ */
#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2
- __virtio32 op_type;
- __virtio32 padding;
+ uint32_t op_type;
+ uint32_t padding;
};
struct virtio_crypto_destroy_session_req {
- /* Device-readable part */
- __virtio64 session_id;
+ /* Device-readable part */
+ uint64_t session_id;
+ uint8_t padding[48];
};
-/* The request of the control viritqueue's packet */
+/* The request of the control virtqueue's packet */
struct virtio_crypto_op_ctrl_req {
- struct virtio_crypto_ctrl_header header;
-
- union {
- struct virtio_crypto_sym_create_session_req sym_create_session;
- struct virtio_crypto_hash_create_session_req hash_create_session;
- struct virtio_crypto_mac_create_session_req mac_create_session;
- struct virtio_crypto_aead_create_session_req aead_create_session;
- struct virtio_crypto_destroy_session_req destroy_session;
- } u;
+ struct virtio_crypto_ctrl_header header;
+
+ union {
+ struct virtio_crypto_sym_create_session_req
+ sym_create_session;
+ struct virtio_crypto_hash_create_session_req
+ hash_create_session;
+ struct virtio_crypto_mac_create_session_req
+ mac_create_session;
+ struct virtio_crypto_aead_create_session_req
+ aead_create_session;
+ struct virtio_crypto_destroy_session_req
+ destroy_session;
+ uint8_t padding[56];
+ } u;
};
struct virtio_crypto_op_header {
#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
#define VIRTIO_CRYPTO_HASH \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
#define VIRTIO_CRYPTO_MAC \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
#define VIRTIO_CRYPTO_AEAD_DECRYPT \
- VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
- __virtio32 opcode;
- /* algo should be service-specific algorithms */
- __virtio32 algo;
- /* session_id should be service-specific algorithms */
- __virtio64 session_id;
- /* control flag to control the request */
- __virtio32 flag;
- __virtio32 padding;
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+ uint32_t opcode;
+ /* algo should be service-specific algorithms */
+ uint32_t algo;
+ /* session_id should be service-specific algorithms */
+ uint64_t session_id;
+ /* control flag to control the request */
+ uint32_t flag;
+ uint32_t padding;
};
struct virtio_crypto_cipher_para {
- /*
- * Byte Length of valid IV/Counter
- *
- * - For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
- * SNOW3G in UEA2 mode, this is the length of the IV (which
- * must be the same as the block length of the cipher).
- * - For block ciphers in CTR mode, this is the length of the counter
- * (which must be the same as the block length of the cipher).
- * - For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
- *
- * The IV/Counter will be updated after every partial cryptographic
- * operation.
- */
- __virtio32 iv_len;
- /* length of source data */
- __virtio32 src_data_len;
- /* length of dst data */
- __virtio32 dst_data_len;
- __virtio32 padding;
+ /*
+ * Byte Length of valid IV/Counter
+ *
+ * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
+ * SNOW3G in UEA2 mode, this is the length of the IV (which
+ * must be the same as the block length of the cipher).
+ * For block ciphers in CTR mode, this is the length of the counter
+ * (which must be the same as the block length of the cipher).
+ * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
+ *
+ * The IV/Counter will be updated after every partial cryptographic
+ * operation.
+ */
+ uint32_t iv_len;
+ /* length of source data */
+ uint32_t src_data_len;
+ /* length of dst data */
+ uint32_t dst_data_len;
+ uint32_t padding;
};
struct virtio_crypto_hash_para {
- /* length of source data */
- __virtio32 src_data_len;
- /* hash result length */
- __virtio32 hash_result_len;
+ /* length of source data */
+ uint32_t src_data_len;
+ /* hash result length */
+ uint32_t hash_result_len;
};
struct virtio_crypto_mac_para {
- struct virtio_crypto_hash_para hash;
+ struct virtio_crypto_hash_para hash;
};
struct virtio_crypto_aead_para {
- /*
- * Byte Length of valid IV data pointed to by the below iv_addr
- * parameter.
- *
- * - For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
- * case iv_addr points to J0.
- * - For CCM mode, this is the length of the nonce, which can be in the
- * range 7 to 13 inclusive.
- */
- __virtio32 iv_len;
- /* length of additional auth data */
- __virtio32 aad_len;
- /* length of source data */
- __virtio32 src_data_len;
- /* length of dst data */
- __virtio32 dst_data_len;
+ /*
+ * Byte Length of valid IV data pointed to by the below iv_addr
+ * parameter.
+ *
+ * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
+ * case iv_addr points to J0.
+ * For CCM mode, this is the length of the nonce, which can be in the
+ * range 7 to 13 inclusive.
+ */
+ uint32_t iv_len;
+ /* length of additional auth data */
+ uint32_t aad_len;
+ /* length of source data */
+ uint32_t src_data_len;
+ /* length of dst data */
+ uint32_t dst_data_len;
};
struct virtio_crypto_cipher_data_req {
- /* Device-readable part */
- struct virtio_crypto_cipher_para para;
+ /* Device-readable part */
+ struct virtio_crypto_cipher_para para;
+ uint8_t padding[24];
};
struct virtio_crypto_hash_data_req {
- /* Device-readable part */
- struct virtio_crypto_hash_para para;
+ /* Device-readable part */
+ struct virtio_crypto_hash_para para;
+ uint8_t padding[40];
};
struct virtio_crypto_mac_data_req {
- /* Device-readable part */
- struct virtio_crypto_mac_para para;
+ /* Device-readable part */
+ struct virtio_crypto_mac_para para;
+ uint8_t padding[40];
};
struct virtio_crypto_alg_chain_data_para {
- __virtio32 iv_len;
- /* Length of source data */
- __virtio32 src_data_len;
- /* Length of destination data */
- __virtio32 dst_data_len;
- /* Starting point for cipher processing in source data */
- __virtio32 cipher_start_src_offset;
- /* Length of the source data that the cipher will be computed on */
- __virtio32 len_to_cipher;
- /* Starting point for hash processing in source data */
- __virtio32 hash_start_src_offset;
- /* Length of the source data that the hash will be computed on */
- __virtio32 len_to_hash;
- /* Length of the additional auth data */
- __virtio32 aad_len;
- /* Length of the hash result */
- __virtio32 hash_result_len;
- __virtio32 reserved;
+ uint32_t iv_len;
+ /* Length of source data */
+ uint32_t src_data_len;
+ /* Length of destination data */
+ uint32_t dst_data_len;
+ /* Starting point for cipher processing in source data */
+ uint32_t cipher_start_src_offset;
+ /* Length of the source data that the cipher will be computed on */
+ uint32_t len_to_cipher;
+ /* Starting point for hash processing in source data */
+ uint32_t hash_start_src_offset;
+ /* Length of the source data that the hash will be computed on */
+ uint32_t len_to_hash;
+ /* Length of the additional auth data */
+ uint32_t aad_len;
+ /* Length of the hash result */
+ uint32_t hash_result_len;
+ uint32_t reserved;
};
struct virtio_crypto_alg_chain_data_req {
- /* Device-readable part */
- struct virtio_crypto_alg_chain_data_para para;
+ /* Device-readable part */
+ struct virtio_crypto_alg_chain_data_para para;
};
struct virtio_crypto_sym_data_req {
- union {
- struct virtio_crypto_cipher_data_req cipher;
- struct virtio_crypto_alg_chain_data_req chain;
- } u;
-
- /* See above VIRTIO_CRYPTO_SYM_OP_* */
- __virtio32 op_type;
- __virtio32 padding;
+ union {
+ struct virtio_crypto_cipher_data_req cipher;
+ struct virtio_crypto_alg_chain_data_req chain;
+ uint8_t padding[40];
+ } u;
+
+ /* See above VIRTIO_CRYPTO_SYM_OP_* */
+ uint32_t op_type;
+ uint32_t padding;
};
struct virtio_crypto_aead_data_req {
- /* Device-readable part */
- struct virtio_crypto_aead_para para;
+ /* Device-readable part */
+ struct virtio_crypto_aead_para para;
+ uint8_t padding[32];
};
-/* The request of the data viritqueue's packet */
+/* The request of the data virtqueue's packet */
struct virtio_crypto_op_data_req {
- struct virtio_crypto_op_header header;
-
- union {
- struct virtio_crypto_sym_data_req sym_req;
- struct virtio_crypto_hash_data_req hash_req;
- struct virtio_crypto_mac_data_req mac_req;
- struct virtio_crypto_aead_data_req aead_req;
- } u;
+ struct virtio_crypto_op_header header;
+
+ union {
+ struct virtio_crypto_sym_data_req sym_req;
+ struct virtio_crypto_hash_data_req hash_req;
+ struct virtio_crypto_mac_data_req mac_req;
+ struct virtio_crypto_aead_data_req aead_req;
+ uint8_t padding[48];
+ } u;
};
#define VIRTIO_CRYPTO_OK 0
#define VIRTIO_CRYPTO_ERR 1
#define VIRTIO_CRYPTO_BADMSG 2
#define VIRTIO_CRYPTO_NOTSUPP 3
-#define VIRTIO_CRYPTO_INVSESS 4 /* Invaild session id */
+#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
/* The accelerator hardware is ready */
#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
-#define VIRTIO_CRYPTO_S_STARTED (1 << 1)
struct virtio_crypto_config {
- /* See VIRTIO_CRYPTO_* above */
- __virtio32 status;
-
- /*
- * Maximum number of data queue legal values are between 1 and 0x8000
- */
- __virtio32 max_dataqueues;
-
- /* Specifies the services mask which the devcie support,
- see VIRTIO_CRYPTO_SERVICE_* above */
- __virtio32 crypto_services;
-
- /* Detailed algorithms mask */
- __virtio32 cipher_algo_l;
- __virtio32 cipher_algo_h;
- __virtio32 hash_algo;
- __virtio32 mac_algo_l;
- __virtio32 mac_algo_h;
- __virtio32 aead_algo;
-
- /* Maximum length of cipher key */
- uint32_t max_cipher_key_len;
- /* Maximum length of authenticated key */
- uint32_t max_auth_key_len;
-
- __virtio32 reserve;
-
- /* The maximum size of per request's content */
- __virtio64 max_size;
+ /* See VIRTIO_CRYPTO_OP_* above */
+ uint32_t status;
+
+ /*
+ * Maximum number of data queue
+ */
+ uint32_t max_dataqueues;
+
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_* above
+ */
+ uint32_t crypto_services;
+
+ /* Detailed algorithms mask */
+ uint32_t cipher_algo_l;
+ uint32_t cipher_algo_h;
+ uint32_t hash_algo;
+ uint32_t mac_algo_l;
+ uint32_t mac_algo_h;
+ uint32_t aead_algo;
+ /* Maximum length of cipher key */
+ uint32_t max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ uint32_t max_auth_key_len;
+ uint32_t reserve;
+ /* Maximum size of each crypto request's content */
+ uint64_t max_size;
};
struct virtio_crypto_inhdr {
- /* See VIRTIO_CRYPTO_* above */
- uint8_t status;
+ /* See VIRTIO_CRYPTO_* above */
+ uint8_t status;
};
-
-#endif /* _LINUX_VIRTIO_CRYPTO_H */
+#endif
diff --git a/include/standard-headers/linux/virtio_mmio.h b/include/standard-headers/linux/virtio_mmio.h
new file mode 100644
index 0000000000..c4b09689ab
--- /dev/null
+++ b/include/standard-headers/linux/virtio_mmio.h
@@ -0,0 +1,141 @@
+/*
+ * Virtio platform device driver
+ *
+ * Copyright 2011, ARM Ltd.
+ *
+ * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MMIO_H
+#define _LINUX_VIRTIO_MMIO_H
+
+/*
+ * Control registers
+ */
+
+/* Magic value ("virt" string) - Read Only */
+#define VIRTIO_MMIO_MAGIC_VALUE 0x000
+
+/* Virtio device version - Read Only */
+#define VIRTIO_MMIO_VERSION 0x004
+
+/* Virtio device ID - Read Only */
+#define VIRTIO_MMIO_DEVICE_ID 0x008
+
+/* Virtio vendor ID - Read Only */
+#define VIRTIO_MMIO_VENDOR_ID 0x00c
+
+/* Bitmask of the features supported by the device (host)
+ * (32 bits per set) - Read Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
+
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
+
+/* Bitmask of features activated by the driver (guest)
+ * (32 bits per set) - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
+
+/* Activated features set selector - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
+
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
+/* Guest's memory page size in bytes - Write Only */
+#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
+
+#endif
+
+
+/* Queue selector - Write Only */
+#define VIRTIO_MMIO_QUEUE_SEL 0x030
+
+/* Maximum size of the currently selected queue - Read Only */
+#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
+
+/* Queue size for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_NUM 0x038
+
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
+/* Used Ring alignment for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
+
+/* Guest's PFN for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_PFN 0x040
+
+#endif
+
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY 0x044
+
+/* Queue notifier - Write Only */
+#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
+
+/* Interrupt status - Read Only */
+#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
+
+/* Interrupt acknowledge - Write Only */
+#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
+
+/* Device status register - Read Write */
+#define VIRTIO_MMIO_STATUS 0x070
+
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
+
+/* The config space is defined by each driver as
+ * the per-driver configuration space - Read Write */
+#define VIRTIO_MMIO_CONFIG 0x100
+
+
+
+/*
+ * Interrupt flags (re: interrupt status & acknowledge registers)
+ */
+
+#define VIRTIO_MMIO_INT_VRING (1 << 0)
+#define VIRTIO_MMIO_INT_CONFIG (1 << 1)
+
+#endif
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 1c9dad1b72..2bf16b203c 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -23,13 +23,12 @@ enum {
QEMU_ARCH_UNICORE32 = (1 << 14),
QEMU_ARCH_MOXIE = (1 << 15),
QEMU_ARCH_TRICORE = (1 << 16),
+ QEMU_ARCH_NIOS2 = (1 << 17),
};
extern const uint32_t arch_type;
void select_soundhw(const char *optarg);
-void do_acpitable_option(const QemuOpts *opts);
-void do_smbios_option(QemuOpts *opts);
void audio_init(void);
int kvm_available(void);
int xen_available(void);
diff --git a/include/sysemu/cryptodev.h b/include/sysemu/cryptodev.h
index 84526c0d35..a9d0d1ee25 100644
--- a/include/sysemu/cryptodev.h
+++ b/include/sysemu/cryptodev.h
@@ -202,6 +202,8 @@ struct CryptoDevBackend {
Object parent_obj;
bool ready;
+ /* Tag the cryptodev backend is used by virtio-crypto or not */
+ bool is_used;
CryptoDevBackendConf conf;
};
@@ -295,4 +297,44 @@ int cryptodev_backend_crypto_operation(
void *opaque,
uint32_t queue_index, Error **errp);
+/**
+ * cryptodev_backend_set_used:
+ * @backend: the cryptodev backend object
+ * @used: ture or false
+ *
+ * Set the cryptodev backend is used by virtio-crypto or not
+ */
+void cryptodev_backend_set_used(CryptoDevBackend *backend, bool used);
+
+/**
+ * cryptodev_backend_is_used:
+ * @backend: the cryptodev backend object
+ *
+ * Return the status that the cryptodev backend is used
+ * by virtio-crypto or not
+ *
+ * Returns: true on used, or false on not used
+ */
+bool cryptodev_backend_is_used(CryptoDevBackend *backend);
+
+/**
+ * cryptodev_backend_set_ready:
+ * @backend: the cryptodev backend object
+ * @ready: ture or false
+ *
+ * Set the cryptodev backend is ready or not, which is called
+ * by the children of the cryptodev banckend interface.
+ */
+void cryptodev_backend_set_ready(CryptoDevBackend *backend, bool ready);
+
+/**
+ * cryptodev_backend_is_ready:
+ * @backend: the cryptodev backend object
+ *
+ * Return the status that the cryptodev backend is ready or not
+ *
+ * Returns: true on ready, or false on not ready
+ */
+bool cryptodev_backend_is_ready(CryptoDevBackend *backend);
+
#endif /* CRYPTODEV_H */
diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h
new file mode 100644
index 0000000000..d9f023918e
--- /dev/null
+++ b/include/sysemu/hax.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * Copyright 2016 Google, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_HAX_H
+#define QEMU_HAX_H
+
+#include "config-host.h"
+#include "qemu-common.h"
+
+int hax_sync_vcpus(void);
+int hax_init_vcpu(CPUState *cpu);
+int hax_smp_cpu_exec(CPUState *cpu);
+int hax_populate_ram(uint64_t va, uint32_t size);
+
+void hax_cpu_synchronize_state(CPUState *cpu);
+void hax_cpu_synchronize_post_reset(CPUState *cpu);
+void hax_cpu_synchronize_post_init(CPUState *cpu);
+
+#ifdef CONFIG_HAX
+
+int hax_enabled(void);
+
+#include "hw/hw.h"
+#include "qemu/bitops.h"
+#include "exec/memory.h"
+int hax_vcpu_destroy(CPUState *cpu);
+void hax_raise_event(CPUState *cpu);
+void hax_reset_vcpu_state(void *opaque);
+#include "target/i386/hax-interface.h"
+#include "target/i386/hax-i386.h"
+
+#else /* CONFIG_HAX */
+
+#define hax_enabled() (0)
+
+#endif /* CONFIG_HAX */
+
+#endif /* QEMU_HAX_H */
diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h
index 678232af40..ecae0cff19 100644
--- a/include/sysemu/hostmem.h
+++ b/include/sysemu/hostmem.h
@@ -52,6 +52,7 @@ struct HostMemoryBackend {
Object parent;
/* protected */
+ char *id;
uint64_t size;
bool merge, dump;
bool prealloc, force_prealloc, is_mapped;
diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h
new file mode 100644
index 0000000000..c9b3105bc7
--- /dev/null
+++ b/include/sysemu/hw_accel.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU Hardware accelertors support
+ *
+ * Copyright 2016 Google, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_HW_ACCEL_H
+#define QEMU_HW_ACCEL_H
+
+#include "qom/cpu.h"
+#include "sysemu/hax.h"
+#include "sysemu/kvm.h"
+
+static inline void cpu_synchronize_state(CPUState *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_state(cpu);
+ }
+ if (hax_enabled()) {
+ hax_cpu_synchronize_state(cpu);
+ }
+}
+
+static inline void cpu_synchronize_post_reset(CPUState *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_post_reset(cpu);
+ }
+ if (hax_enabled()) {
+ hax_cpu_synchronize_post_reset(cpu);
+ }
+}
+
+static inline void cpu_synchronize_post_init(CPUState *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_post_init(cpu);
+ }
+ if (hax_enabled()) {
+ hax_cpu_synchronize_post_init(cpu);
+ }
+}
+
+#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index 68ac2de83a..e6da1a4087 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -28,6 +28,11 @@ typedef struct {
QemuCond init_done_cond; /* is thread initialization done? */
bool stopping;
int thread_id;
+
+ /* AioContext poll parameters */
+ int64_t poll_max_ns;
+ int64_t poll_grow;
+ int64_t poll_shrink;
} IOThread;
#define IOTHREAD(obj) \
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index df67cc0672..3045ee7678 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -461,29 +461,6 @@ void kvm_cpu_synchronize_state(CPUState *cpu);
void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu);
-/* generic hooks - to be moved/refactored once there are more users */
-
-static inline void cpu_synchronize_state(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_state(cpu);
- }
-}
-
-static inline void cpu_synchronize_post_reset(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_post_reset(cpu);
- }
-}
-
-static inline void cpu_synchronize_post_init(CPUState *cpu)
-{
- if (kvm_enabled()) {
- kvm_cpu_synchronize_post_init(cpu);
- }
-}
-
/**
* kvm_irqchip_add_msi_route - Add MSI route for specific vector
* @s: KVM state
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
index 4da808a6e9..8f09dcf918 100644
--- a/include/sysemu/numa.h
+++ b/include/sysemu/numa.h
@@ -17,7 +17,7 @@ struct numa_addr_range {
typedef struct node_info {
uint64_t node_mem;
- DECLARE_BITMAP(node_cpu, MAX_CPUMASK_BITS);
+ unsigned long *node_cpu;
struct HostMemoryBackend *node_memdev;
bool present;
QLIST_HEAD(, numa_addr_range) addr; /* List to store address ranges */
diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h
index f80d6d28e8..abb35ca8c9 100644
--- a/include/sysemu/replay.h
+++ b/include/sysemu/replay.h
@@ -39,6 +39,8 @@ enum ReplayCheckpoint {
};
typedef enum ReplayCheckpoint ReplayCheckpoint;
+typedef struct ReplayNetState ReplayNetState;
+
extern ReplayMode replay_mode;
/* Replay process control functions */
@@ -137,4 +139,14 @@ void replay_char_read_all_save_error(int res);
/*! Writes character read_all execution result into the replay log. */
void replay_char_read_all_save_buf(uint8_t *buf, int offset);
+/* Network */
+
+/*! Registers replay network filter attached to some backend. */
+ReplayNetState *replay_register_net(NetFilterState *nfs);
+/*! Unregisters replay network filter. */
+void replay_unregister_net(ReplayNetState *rns);
+/*! Called to write network packet to the replay log. */
+void replay_net_packet_event(ReplayNetState *rns, unsigned flags,
+ const struct iovec *iov, int iovcnt);
+
#endif
diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h
new file mode 100644
index 0000000000..0b0d6d7598
--- /dev/null
+++ b/include/sysemu/reset.h
@@ -0,0 +1,10 @@
+#ifndef QEMU_SYSEMU_RESET_H
+#define QEMU_SYSEMU_RESET_H
+
+typedef void QEMUResetHandler(void *opaque);
+
+void qemu_register_reset(QEMUResetHandler *func, void *opaque);
+void qemu_unregister_reset(QEMUResetHandler *func, void *opaque);
+void qemu_devices_reset(void);
+
+#endif
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index 66c6f1577e..ff8ffb5e47 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -63,7 +63,6 @@ void qemu_system_vmstop_request_prepare(void);
int qemu_shutdown_requested_get(void);
int qemu_reset_requested_get(void);
void qemu_system_killed(int signal, pid_t pid);
-void qemu_devices_reset(void);
void qemu_system_reset(bool report);
void qemu_system_guest_panicked(void);
size_t qemu_target_page_bits(void);
@@ -168,13 +167,6 @@ extern int mem_prealloc;
#define MAX_NODES 128
#define NUMA_NODE_UNASSIGNED MAX_NODES
-/* The following shall be true for all CPUs:
- * cpu->cpu_index < max_cpus <= MAX_CPUMASK_BITS
- *
- * Note that cpu->get_arch_id() may be larger than MAX_CPUMASK_BITS.
- */
-#define MAX_CPUMASK_BITS 288
-
#define MAX_OPTION_ROMS 16
typedef struct QEMUOptionRom {
const char *name;
diff --git a/include/ui/console.h b/include/ui/console.h
index e2589e2134..b59e7b8c15 100644
--- a/include/ui/console.h
+++ b/include/ui/console.h
@@ -337,7 +337,10 @@ static inline pixman_format_code_t surface_format(DisplaySurface *s)
}
#ifdef CONFIG_CURSES
+/* KEY_EVENT is defined in wincon.h and in curses.h. Avoid redefinition. */
+#undef KEY_EVENT
#include <curses.h>
+#undef KEY_EVENT
typedef chtype console_ch_t;
extern chtype vga_to_curses[];
#else
@@ -394,6 +397,10 @@ uint32_t qemu_console_get_head(QemuConsole *con);
QemuUIInfo *qemu_console_get_ui_info(QemuConsole *con);
int qemu_console_get_width(QemuConsole *con, int fallback);
int qemu_console_get_height(QemuConsole *con, int fallback);
+/* Return the low-level window id for the console */
+int qemu_console_get_window_id(QemuConsole *con);
+/* Set the low-level window id for the console */
+void qemu_console_set_window_id(QemuConsole *con, int window_id);
void console_select(unsigned int index);
void qemu_console_resize(QemuConsole *con, int width, int height);
diff --git a/include/ui/gtk.h b/include/ui/gtk.h
index 42ca0fea8b..b3b50059c7 100644
--- a/include/ui/gtk.h
+++ b/include/ui/gtk.h
@@ -18,6 +18,10 @@
#include <X11/XKBlib.h>
#endif
+#ifdef GDK_WINDOWING_WAYLAND
+#include <gdk/gdkwayland.h>
+#endif
+
#if defined(CONFIG_OPENGL)
#include "ui/egl-helpers.h"
#include "ui/egl-context.h"
diff --git a/io/Makefile.objs b/io/Makefile.objs
index 9d8337d89a..12983cca79 100644
--- a/io/Makefile.objs
+++ b/io/Makefile.objs
@@ -7,4 +7,5 @@ io-obj-y += channel-tls.o
io-obj-y += channel-watch.o
io-obj-y += channel-websock.o
io-obj-y += channel-util.o
+io-obj-y += dns-resolver.o
io-obj-y += task.o
diff --git a/io/channel-socket.c b/io/channel-socket.c
index d7e03f6266..f385233f18 100644
--- a/io/channel-socket.c
+++ b/io/channel-socket.c
@@ -156,20 +156,16 @@ int qio_channel_socket_connect_sync(QIOChannelSocket *ioc,
}
-static int qio_channel_socket_connect_worker(QIOTask *task,
- Error **errp,
- gpointer opaque)
+static void qio_channel_socket_connect_worker(QIOTask *task,
+ gpointer opaque)
{
QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
SocketAddress *addr = opaque;
- int ret;
+ Error *err = NULL;
- ret = qio_channel_socket_connect_sync(ioc,
- addr,
- errp);
+ qio_channel_socket_connect_sync(ioc, addr, &err);
- object_unref(OBJECT(ioc));
- return ret;
+ qio_task_set_error(task, err);
}
@@ -219,20 +215,16 @@ int qio_channel_socket_listen_sync(QIOChannelSocket *ioc,
}
-static int qio_channel_socket_listen_worker(QIOTask *task,
- Error **errp,
- gpointer opaque)
+static void qio_channel_socket_listen_worker(QIOTask *task,
+ gpointer opaque)
{
QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
SocketAddress *addr = opaque;
- int ret;
+ Error *err = NULL;
- ret = qio_channel_socket_listen_sync(ioc,
- addr,
- errp);
+ qio_channel_socket_listen_sync(ioc, addr, &err);
- object_unref(OBJECT(ioc));
- return ret;
+ qio_task_set_error(task, err);
}
@@ -295,22 +287,18 @@ static void qio_channel_socket_dgram_worker_free(gpointer opaque)
g_free(data);
}
-static int qio_channel_socket_dgram_worker(QIOTask *task,
- Error **errp,
- gpointer opaque)
+static void qio_channel_socket_dgram_worker(QIOTask *task,
+ gpointer opaque)
{
QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
struct QIOChannelSocketDGramWorkerData *data = opaque;
- int ret;
+ Error *err = NULL;
/* socket_dgram() blocks in DNS lookups, so we must use a thread */
- ret = qio_channel_socket_dgram_sync(ioc,
- data->localAddr,
- data->remoteAddr,
- errp);
+ qio_channel_socket_dgram_sync(ioc, data->localAddr,
+ data->remoteAddr, &err);
- object_unref(OBJECT(ioc));
- return ret;
+ qio_task_set_error(task, err);
}
diff --git a/io/channel-tls.c b/io/channel-tls.c
index d24dc8c613..f25ab0ae53 100644
--- a/io/channel-tls.c
+++ b/io/channel-tls.c
@@ -153,8 +153,9 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
if (qcrypto_tls_session_handshake(ioc->session, &err) < 0) {
trace_qio_channel_tls_handshake_fail(ioc);
- qio_task_abort(task, err);
- goto cleanup;
+ qio_task_set_error(task, err);
+ qio_task_complete(task);
+ return;
}
status = qcrypto_tls_session_get_handshake_status(ioc->session);
@@ -163,10 +164,10 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
if (qcrypto_tls_session_check_credentials(ioc->session,
&err) < 0) {
trace_qio_channel_tls_credentials_deny(ioc);
- qio_task_abort(task, err);
- goto cleanup;
+ qio_task_set_error(task, err);
+ } else {
+ trace_qio_channel_tls_credentials_allow(ioc);
}
- trace_qio_channel_tls_credentials_allow(ioc);
qio_task_complete(task);
} else {
GIOCondition condition;
@@ -183,9 +184,6 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc,
task,
NULL);
}
-
- cleanup:
- error_free(err);
}
@@ -200,8 +198,6 @@ static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc,
qio_channel_tls_handshake_task(
tioc, task);
- object_unref(OBJECT(tioc));
-
return FALSE;
}
diff --git a/io/channel-websock.c b/io/channel-websock.c
index f45bced82a..e47279a1ae 100644
--- a/io/channel-websock.c
+++ b/io/channel-websock.c
@@ -279,8 +279,8 @@ static gboolean qio_channel_websock_handshake_send(QIOChannel *ioc,
if (ret < 0) {
trace_qio_channel_websock_handshake_fail(ioc);
- qio_task_abort(task, err);
- error_free(err);
+ qio_task_set_error(task, err);
+ qio_task_complete(task);
return FALSE;
}
@@ -307,8 +307,8 @@ static gboolean qio_channel_websock_handshake_io(QIOChannel *ioc,
ret = qio_channel_websock_handshake_read(wioc, &err);
if (ret < 0) {
trace_qio_channel_websock_handshake_fail(ioc);
- qio_task_abort(task, err);
- error_free(err);
+ qio_task_set_error(task, err);
+ qio_task_complete(task);
return FALSE;
}
if (ret == 0) {
diff --git a/io/dns-resolver.c b/io/dns-resolver.c
new file mode 100644
index 0000000000..0ac6b23c02
--- /dev/null
+++ b/io/dns-resolver.c
@@ -0,0 +1,276 @@
+/*
+ * QEMU DNS resolver
+ *
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "io/dns-resolver.h"
+#include "qapi/clone-visitor.h"
+#include "qemu/sockets.h"
+#include "qapi/error.h"
+#include "qemu/cutils.h"
+
+#ifndef AI_NUMERICSERV
+# define AI_NUMERICSERV 0
+#endif
+
+static QIODNSResolver *instance;
+static GOnce instance_init = G_ONCE_INIT;
+
+static gpointer qio_dns_resolve_init_instance(gpointer unused G_GNUC_UNUSED)
+{
+ instance = QIO_DNS_RESOLVER(object_new(TYPE_QIO_DNS_RESOLVER));
+ return NULL;
+}
+
+QIODNSResolver *qio_dns_resolver_get_instance(void)
+{
+ g_once(&instance_init, qio_dns_resolve_init_instance, NULL);
+ return instance;
+}
+
+static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver,
+ SocketAddress *addr,
+ size_t *naddrs,
+ SocketAddress ***addrs,
+ Error **errp)
+{
+ struct addrinfo ai, *res, *e;
+ InetSocketAddress *iaddr = addr->u.inet.data;
+ char port[33];
+ char uaddr[INET6_ADDRSTRLEN + 1];
+ char uport[33];
+ int rc;
+ Error *err = NULL;
+ size_t i;
+
+ *naddrs = 0;
+ *addrs = NULL;
+
+ memset(&ai, 0, sizeof(ai));
+ ai.ai_flags = AI_PASSIVE;
+ if (iaddr->has_numeric && iaddr->numeric) {
+ ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV;
+ }
+ ai.ai_family = inet_ai_family_from_address(iaddr, &err);
+ ai.ai_socktype = SOCK_STREAM;
+
+ if (err) {
+ error_propagate(errp, err);
+ return -1;
+ }
+
+ if (iaddr->host == NULL) {
+ error_setg(errp, "host not specified");
+ return -1;
+ }
+ if (iaddr->port != NULL) {
+ pstrcpy(port, sizeof(port), iaddr->port);
+ } else {
+ port[0] = '\0';
+ }
+
+ rc = getaddrinfo(strlen(iaddr->host) ? iaddr->host : NULL,
+ strlen(port) ? port : NULL, &ai, &res);
+ if (rc != 0) {
+ error_setg(errp, "address resolution failed for %s:%s: %s",
+ iaddr->host, port, gai_strerror(rc));
+ return -1;
+ }
+
+ for (e = res; e != NULL; e = e->ai_next) {
+ (*naddrs)++;
+ }
+
+ *addrs = g_new0(SocketAddress *, *naddrs);
+
+ /* create socket + bind */
+ for (i = 0, e = res; e != NULL; i++, e = e->ai_next) {
+ SocketAddress *newaddr = g_new0(SocketAddress, 1);
+ InetSocketAddress *newiaddr = g_new0(InetSocketAddress, 1);
+ newaddr->u.inet.data = newiaddr;
+ newaddr->type = SOCKET_ADDRESS_KIND_INET;
+
+ getnameinfo((struct sockaddr *)e->ai_addr, e->ai_addrlen,
+ uaddr, INET6_ADDRSTRLEN, uport, 32,
+ NI_NUMERICHOST | NI_NUMERICSERV);
+
+ *newiaddr = (InetSocketAddress){
+ .host = g_strdup(uaddr),
+ .port = g_strdup(uport),
+ .has_numeric = true,
+ .numeric = true,
+ .has_to = iaddr->has_to,
+ .to = iaddr->to,
+ .has_ipv4 = false,
+ .has_ipv6 = false,
+ };
+
+ (*addrs)[i] = newaddr;
+ }
+ freeaddrinfo(res);
+ return 0;
+}
+
+
+static int qio_dns_resolver_lookup_sync_nop(QIODNSResolver *resolver,
+ SocketAddress *addr,
+ size_t *naddrs,
+ SocketAddress ***addrs,
+ Error **errp)
+{
+ *naddrs = 1;
+ *addrs = g_new0(SocketAddress *, 1);
+ (*addrs)[0] = QAPI_CLONE(SocketAddress, addr);
+
+ return 0;
+}
+
+
+int qio_dns_resolver_lookup_sync(QIODNSResolver *resolver,
+ SocketAddress *addr,
+ size_t *naddrs,
+ SocketAddress ***addrs,
+ Error **errp)
+{
+ switch (addr->type) {
+ case SOCKET_ADDRESS_KIND_INET:
+ return qio_dns_resolver_lookup_sync_inet(resolver,
+ addr,
+ naddrs,
+ addrs,
+ errp);
+
+ case SOCKET_ADDRESS_KIND_UNIX:
+ case SOCKET_ADDRESS_KIND_VSOCK:
+ return qio_dns_resolver_lookup_sync_nop(resolver,
+ addr,
+ naddrs,
+ addrs,
+ errp);
+
+ default:
+ error_setg(errp, "Unknown socket address kind");
+ return -1;
+ }
+}
+
+
+struct QIODNSResolverLookupData {
+ SocketAddress *addr;
+ SocketAddress **addrs;
+ size_t naddrs;
+};
+
+
+static void qio_dns_resolver_lookup_data_free(gpointer opaque)
+{
+ struct QIODNSResolverLookupData *data = opaque;
+ size_t i;
+
+ qapi_free_SocketAddress(data->addr);
+ for (i = 0; i < data->naddrs; i++) {
+ qapi_free_SocketAddress(data->addrs[i]);
+ }
+
+ g_free(data->addrs);
+ g_free(data);
+}
+
+
+static void qio_dns_resolver_lookup_worker(QIOTask *task,
+ gpointer opaque)
+{
+ QIODNSResolver *resolver = QIO_DNS_RESOLVER(qio_task_get_source(task));
+ struct QIODNSResolverLookupData *data = opaque;
+ Error *err = NULL;
+
+ qio_dns_resolver_lookup_sync(resolver,
+ data->addr,
+ &data->naddrs,
+ &data->addrs,
+ &err);
+ if (err) {
+ qio_task_set_error(task, err);
+ } else {
+ qio_task_set_result_pointer(task, opaque, NULL);
+ }
+
+ object_unref(OBJECT(resolver));
+}
+
+
+void qio_dns_resolver_lookup_async(QIODNSResolver *resolver,
+ SocketAddress *addr,
+ QIOTaskFunc func,
+ gpointer opaque,
+ GDestroyNotify notify)
+{
+ QIOTask *task;
+ struct QIODNSResolverLookupData *data =
+ g_new0(struct QIODNSResolverLookupData, 1);
+
+ data->addr = QAPI_CLONE(SocketAddress, addr);
+
+ task = qio_task_new(OBJECT(resolver), func, opaque, notify);
+
+ qio_task_run_in_thread(task,
+ qio_dns_resolver_lookup_worker,
+ data,
+ qio_dns_resolver_lookup_data_free);
+}
+
+
+void qio_dns_resolver_lookup_result(QIODNSResolver *resolver,
+ QIOTask *task,
+ size_t *naddrs,
+ SocketAddress ***addrs)
+{
+ struct QIODNSResolverLookupData *data =
+ qio_task_get_result_pointer(task);
+ size_t i;
+
+ *naddrs = 0;
+ *addrs = NULL;
+ if (!data) {
+ return;
+ }
+
+ *naddrs = data->naddrs;
+ *addrs = g_new0(SocketAddress *, data->naddrs);
+ for (i = 0; i < data->naddrs; i++) {
+ (*addrs)[i] = QAPI_CLONE(SocketAddress, data->addrs[i]);
+ }
+}
+
+
+static const TypeInfo qio_dns_resolver_info = {
+ .parent = TYPE_OBJECT,
+ .name = TYPE_QIO_DNS_RESOLVER,
+ .instance_size = sizeof(QIODNSResolver),
+ .class_size = sizeof(QIODNSResolverClass),
+};
+
+
+static void qio_dns_resolver_register_types(void)
+{
+ type_register_static(&qio_dns_resolver_info);
+}
+
+
+type_init(qio_dns_resolver_register_types);
diff --git a/io/task.c b/io/task.c
index c7f97a9b16..60bf1a94d5 100644
--- a/io/task.c
+++ b/io/task.c
@@ -29,6 +29,9 @@ struct QIOTask {
QIOTaskFunc func;
gpointer opaque;
GDestroyNotify destroy;
+ Error *err;
+ gpointer result;
+ GDestroyNotify destroyResult;
};
@@ -57,6 +60,12 @@ static void qio_task_free(QIOTask *task)
if (task->destroy) {
task->destroy(task->opaque);
}
+ if (task->destroyResult) {
+ task->destroyResult(task->result);
+ }
+ if (task->err) {
+ error_free(task->err);
+ }
object_unref(task->source);
g_free(task);
@@ -68,8 +77,6 @@ struct QIOTaskThreadData {
QIOTaskWorker worker;
gpointer opaque;
GDestroyNotify destroy;
- Error *err;
- int ret;
};
@@ -78,13 +85,8 @@ static gboolean gio_task_thread_result(gpointer opaque)
struct QIOTaskThreadData *data = opaque;
trace_qio_task_thread_result(data->task);
- if (data->ret == 0) {
- qio_task_complete(data->task);
- } else {
- qio_task_abort(data->task, data->err);
- }
+ qio_task_complete(data->task);
- error_free(data->err);
if (data->destroy) {
data->destroy(data->opaque);
}
@@ -100,10 +102,7 @@ static gpointer qio_task_thread_worker(gpointer opaque)
struct QIOTaskThreadData *data = opaque;
trace_qio_task_thread_run(data->task);
- data->ret = data->worker(data->task, &data->err, data->opaque);
- if (data->ret < 0 && data->err == NULL) {
- error_setg(&data->err, "Task worker failed but did not set an error");
- }
+ data->worker(data->task, data->opaque);
/* We're running in the background thread, and must only
* ever report the task results in the main event loop
@@ -140,22 +139,47 @@ void qio_task_run_in_thread(QIOTask *task,
void qio_task_complete(QIOTask *task)
{
- task->func(task->source, NULL, task->opaque);
+ task->func(task, task->opaque);
trace_qio_task_complete(task);
qio_task_free(task);
}
-void qio_task_abort(QIOTask *task,
- Error *err)
+
+void qio_task_set_error(QIOTask *task,
+ Error *err)
{
- task->func(task->source, err, task->opaque);
- trace_qio_task_abort(task);
- qio_task_free(task);
+ error_propagate(&task->err, err);
+}
+
+
+bool qio_task_propagate_error(QIOTask *task,
+ Error **errp)
+{
+ if (task->err) {
+ error_propagate(errp, task->err);
+ return true;
+ }
+
+ return false;
+}
+
+
+void qio_task_set_result_pointer(QIOTask *task,
+ gpointer result,
+ GDestroyNotify destroy)
+{
+ task->result = result;
+ task->destroyResult = destroy;
+}
+
+
+gpointer qio_task_get_result_pointer(QIOTask *task)
+{
+ return task->result;
}
Object *qio_task_get_source(QIOTask *task)
{
- object_ref(task->source);
return task->source;
}
diff --git a/io/trace-events b/io/trace-events
index e31b596ca1..ff993bef45 100644
--- a/io/trace-events
+++ b/io/trace-events
@@ -3,7 +3,6 @@
# io/task.c
qio_task_new(void *task, void *source, void *func, void *opaque) "Task new task=%p source=%p func=%p opaque=%p"
qio_task_complete(void *task) "Task complete task=%p"
-qio_task_abort(void *task) "Task abort task=%p"
qio_task_thread_start(void *task, void *worker, void *opaque) "Task thread start task=%p worker=%p opaque=%p"
qio_task_thread_run(void *task) "Task thread run task=%p"
qio_task_thread_exit(void *task) "Task thread exit task=%p"
diff --git a/iohandler.c b/iohandler.c
index f2fc8a9bd6..623b55b9ec 100644
--- a/iohandler.c
+++ b/iohandler.c
@@ -63,7 +63,15 @@ void qemu_set_fd_handler(int fd,
{
iohandler_init();
aio_set_fd_handler(iohandler_ctx, fd, false,
- fd_read, fd_write, opaque);
+ fd_read, fd_write, NULL, opaque);
+}
+
+void event_notifier_set_handler(EventNotifier *e,
+ EventNotifierHandler *handler)
+{
+ iohandler_init();
+ aio_set_event_notifier(iohandler_ctx, e, false,
+ handler, NULL);
}
/* reaping of zombies. right now we're not passing the status to
diff --git a/iothread.c b/iothread.c
index bd70344811..7bedde87e9 100644
--- a/iothread.c
+++ b/iothread.c
@@ -98,6 +98,18 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
return;
}
+ aio_context_set_poll_params(iothread->ctx,
+ iothread->poll_max_ns,
+ iothread->poll_grow,
+ iothread->poll_shrink,
+ &local_error);
+ if (local_error) {
+ error_propagate(errp, local_error);
+ aio_context_unref(iothread->ctx);
+ iothread->ctx = NULL;
+ return;
+ }
+
qemu_mutex_init(&iothread->init_done_lock);
qemu_cond_init(&iothread->init_done_cond);
@@ -120,10 +132,82 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
qemu_mutex_unlock(&iothread->init_done_lock);
}
+typedef struct {
+ const char *name;
+ ptrdiff_t offset; /* field's byte offset in IOThread struct */
+} PollParamInfo;
+
+static PollParamInfo poll_max_ns_info = {
+ "poll-max-ns", offsetof(IOThread, poll_max_ns),
+};
+static PollParamInfo poll_grow_info = {
+ "poll-grow", offsetof(IOThread, poll_grow),
+};
+static PollParamInfo poll_shrink_info = {
+ "poll-shrink", offsetof(IOThread, poll_shrink),
+};
+
+static void iothread_get_poll_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+ PollParamInfo *info = opaque;
+ int64_t *field = (void *)iothread + info->offset;
+
+ visit_type_int64(v, name, field, errp);
+}
+
+static void iothread_set_poll_param(Object *obj, Visitor *v,
+ const char *name, void *opaque, Error **errp)
+{
+ IOThread *iothread = IOTHREAD(obj);
+ PollParamInfo *info = opaque;
+ int64_t *field = (void *)iothread + info->offset;
+ Error *local_err = NULL;
+ int64_t value;
+
+ visit_type_int64(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+
+ if (value < 0) {
+ error_setg(&local_err, "%s value must be in range [0, %"PRId64"]",
+ info->name, INT64_MAX);
+ goto out;
+ }
+
+ *field = value;
+
+ if (iothread->ctx) {
+ aio_context_set_poll_params(iothread->ctx,
+ iothread->poll_max_ns,
+ iothread->poll_grow,
+ iothread->poll_shrink,
+ &local_err);
+ }
+
+out:
+ error_propagate(errp, local_err);
+}
+
static void iothread_class_init(ObjectClass *klass, void *class_data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass);
ucc->complete = iothread_complete;
+
+ object_class_property_add(klass, "poll-max-ns", "int",
+ iothread_get_poll_param,
+ iothread_set_poll_param,
+ NULL, &poll_max_ns_info, &error_abort);
+ object_class_property_add(klass, "poll-grow", "int",
+ iothread_get_poll_param,
+ iothread_set_poll_param,
+ NULL, &poll_grow_info, &error_abort);
+ object_class_property_add(klass, "poll-shrink", "int",
+ iothread_get_poll_param,
+ iothread_set_poll_param,
+ NULL, &poll_shrink_info, &error_abort);
}
static const TypeInfo iothread_info = {
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index ac7a1f136a..1e86a3dd0d 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -172,8 +172,6 @@ struct vhost_memory {
#define VHOST_F_LOG_ALL 26
/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
#define VHOST_NET_F_VIRTIO_NET_HDR 27
-/* Vhost have device IOTLB */
-#define VHOST_F_DEVICE_IOTLB 63
/* VHOST_SCSI specific definitions */
diff --git a/linux-user/alpha/target_syscall.h b/linux-user/alpha/target_syscall.h
index b580fc5b37..3426cc5b4e 100644
--- a/linux-user/alpha/target_syscall.h
+++ b/linux-user/alpha/target_syscall.h
@@ -235,6 +235,8 @@ struct target_pt_regs {
#define TARGET_ENOTRECOVERABLE 137
#undef TARGET_ERFKILL
#define TARGET_ERFKILL 138
+#undef TARGET_EHWPOISON
+#define TARGET_EHWPOISON 139
// For sys_osf_getsysinfo
#define TARGET_GSI_UACPROC 8
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 547053c27a..c66cbbe84b 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -967,6 +967,63 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env
#endif /* TARGET_MICROBLAZE */
+#ifdef TARGET_NIOS2
+
+#define ELF_START_MMAP 0x80000000
+
+#define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_ALTERA_NIOS2
+
+static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
+{
+ regs->ea = infop->entry;
+ regs->sp = infop->start_stack;
+ regs->estatus = 0x3;
+}
+
+#define ELF_EXEC_PAGESIZE 4096
+
+#define USE_ELF_CORE_DUMP
+#define ELF_NREG 49
+typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
+
+/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
+static void elf_core_copy_regs(target_elf_gregset_t *regs,
+ const CPUNios2State *env)
+{
+ int i;
+
+ (*regs)[0] = -1;
+ for (i = 1; i < 8; i++) /* r0-r7 */
+ (*regs)[i] = tswapreg(env->regs[i + 7]);
+
+ for (i = 8; i < 16; i++) /* r8-r15 */
+ (*regs)[i] = tswapreg(env->regs[i - 8]);
+
+ for (i = 16; i < 24; i++) /* r16-r23 */
+ (*regs)[i] = tswapreg(env->regs[i + 7]);
+ (*regs)[24] = -1; /* R_ET */
+ (*regs)[25] = -1; /* R_BT */
+ (*regs)[26] = tswapreg(env->regs[R_GP]);
+ (*regs)[27] = tswapreg(env->regs[R_SP]);
+ (*regs)[28] = tswapreg(env->regs[R_FP]);
+ (*regs)[29] = tswapreg(env->regs[R_EA]);
+ (*regs)[30] = -1; /* R_SSTATUS */
+ (*regs)[31] = tswapreg(env->regs[R_RA]);
+
+ (*regs)[32] = tswapreg(env->regs[R_PC]);
+
+ (*regs)[33] = -1; /* R_STATUS */
+ (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
+
+ for (i = 35; i < 49; i++) /* ... */
+ (*regs)[i] = -1;
+}
+
+#endif /* TARGET_NIOS2 */
+
#ifdef TARGET_OPENRISC
#define ELF_START_MMAP 0x08000000
@@ -1215,6 +1272,30 @@ static inline void init_thread(struct target_pt_regs *regs,
#endif /* TARGET_TILEGX */
+#ifdef TARGET_HPPA
+
+#define ELF_START_MMAP 0x80000000
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_PARISC
+#define ELF_PLATFORM "PARISC"
+#define STACK_GROWS_DOWN 0
+#define STACK_ALIGNMENT 64
+
+static inline void init_thread(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ regs->iaoq[0] = infop->entry;
+ regs->iaoq[1] = infop->entry + 4;
+ regs->gr[23] = 0;
+ regs->gr[24] = infop->arg_start;
+ regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong);
+ /* The top-of-stack contains a linkage buffer. */
+ regs->gr[30] = infop->start_stack + 64;
+ regs->gr[31] = infop->entry;
+}
+
+#endif /* TARGET_HPPA */
+
#ifndef ELF_PLATFORM
#define ELF_PLATFORM (NULL)
#endif
@@ -1231,6 +1312,14 @@ static inline void init_thread(struct target_pt_regs *regs,
#define ELF_HWCAP 0
#endif
+#ifndef STACK_GROWS_DOWN
+#define STACK_GROWS_DOWN 1
+#endif
+
+#ifndef STACK_ALIGNMENT
+#define STACK_ALIGNMENT 16
+#endif
+
#ifdef TARGET_ABI32
#undef ELF_CLASS
#define ELF_CLASS ELFCLASS32
@@ -1374,45 +1463,78 @@ static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
abi_ulong p, abi_ulong stack_limit)
{
char *tmp;
- int len, offset;
+ int len, i;
abi_ulong top = p;
if (!p) {
return 0; /* bullet-proofing */
}
- offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
+ if (STACK_GROWS_DOWN) {
+ int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
+ for (i = argc - 1; i >= 0; --i) {
+ tmp = argv[i];
+ if (!tmp) {
+ fprintf(stderr, "VFS: argc is wrong");
+ exit(-1);
+ }
+ len = strlen(tmp) + 1;
+ tmp += len;
- while (argc-- > 0) {
- tmp = argv[argc];
- if (!tmp) {
- fprintf(stderr, "VFS: argc is wrong");
- exit(-1);
+ if (len > (p - stack_limit)) {
+ return 0;
+ }
+ while (len) {
+ int bytes_to_copy = (len > offset) ? offset : len;
+ tmp -= bytes_to_copy;
+ p -= bytes_to_copy;
+ offset -= bytes_to_copy;
+ len -= bytes_to_copy;
+
+ memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
+
+ if (offset == 0) {
+ memcpy_to_target(p, scratch, top - p);
+ top = p;
+ offset = TARGET_PAGE_SIZE;
+ }
+ }
}
- len = strlen(tmp) + 1;
- tmp += len;
-
- if (len > (p - stack_limit)) {
- return 0;
+ if (p != top) {
+ memcpy_to_target(p, scratch + offset, top - p);
}
- while (len) {
- int bytes_to_copy = (len > offset) ? offset : len;
- tmp -= bytes_to_copy;
- p -= bytes_to_copy;
- offset -= bytes_to_copy;
- len -= bytes_to_copy;
-
- memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
-
- if (offset == 0) {
- memcpy_to_target(p, scratch, top - p);
- top = p;
- offset = TARGET_PAGE_SIZE;
+ } else {
+ int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
+ for (i = 0; i < argc; ++i) {
+ tmp = argv[i];
+ if (!tmp) {
+ fprintf(stderr, "VFS: argc is wrong");
+ exit(-1);
+ }
+ len = strlen(tmp) + 1;
+ if (len > (stack_limit - p)) {
+ return 0;
+ }
+ while (len) {
+ int bytes_to_copy = (len > remaining) ? remaining : len;
+
+ memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
+
+ tmp += bytes_to_copy;
+ remaining -= bytes_to_copy;
+ p += bytes_to_copy;
+ len -= bytes_to_copy;
+
+ if (remaining == 0) {
+ memcpy_to_target(top, scratch, p - top);
+ top = p;
+ remaining = TARGET_PAGE_SIZE;
+ }
}
}
- }
- if (offset) {
- memcpy_to_target(p, scratch + offset, top - p);
+ if (p != top) {
+ memcpy_to_target(top, scratch, p - top);
+ }
}
return p;
@@ -1447,11 +1569,15 @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
}
/* We reserve one extra page at the top of the stack as guard. */
- target_mprotect(error, guard, PROT_NONE);
-
- info->stack_limit = error + guard;
-
- return info->stack_limit + size - sizeof(void *);
+ if (STACK_GROWS_DOWN) {
+ target_mprotect(error, guard, PROT_NONE);
+ info->stack_limit = error + guard;
+ return info->stack_limit + size - sizeof(void *);
+ } else {
+ target_mprotect(error + size, guard, PROT_NONE);
+ info->stack_limit = error + size;
+ return error;
+ }
}
/* Map and zero the bss. We need to explicitly zero any fractional pages
@@ -1529,7 +1655,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
struct image_info *interp_info)
{
abi_ulong sp;
- abi_ulong sp_auxv;
+ abi_ulong u_argc, u_argv, u_envp, u_auxv;
int size;
int i;
abi_ulong u_rand_bytes;
@@ -1558,10 +1684,25 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
k_platform = ELF_PLATFORM;
if (k_platform) {
size_t len = strlen(k_platform) + 1;
- sp -= (len + n - 1) & ~(n - 1);
- u_platform = sp;
- /* FIXME - check return value of memcpy_to_target() for failure */
- memcpy_to_target(sp, k_platform, len);
+ if (STACK_GROWS_DOWN) {
+ sp -= (len + n - 1) & ~(n - 1);
+ u_platform = sp;
+ /* FIXME - check return value of memcpy_to_target() for failure */
+ memcpy_to_target(sp, k_platform, len);
+ } else {
+ memcpy_to_target(sp, k_platform, len);
+ u_platform = sp;
+ sp += len + 1;
+ }
+ }
+
+ /* Provide 16 byte alignment for the PRNG, and basic alignment for
+ * the argv and envp pointers.
+ */
+ if (STACK_GROWS_DOWN) {
+ sp = QEMU_ALIGN_DOWN(sp, 16);
+ } else {
+ sp = QEMU_ALIGN_UP(sp, 16);
}
/*
@@ -1571,15 +1712,17 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
for (i = 0; i < 16; i++) {
k_rand_bytes[i] = rand();
}
- sp -= 16;
- u_rand_bytes = sp;
- /* FIXME - check return value of memcpy_to_target() for failure */
- memcpy_to_target(sp, k_rand_bytes, 16);
+ if (STACK_GROWS_DOWN) {
+ sp -= 16;
+ u_rand_bytes = sp;
+ /* FIXME - check return value of memcpy_to_target() for failure */
+ memcpy_to_target(sp, k_rand_bytes, 16);
+ } else {
+ memcpy_to_target(sp, k_rand_bytes, 16);
+ u_rand_bytes = sp;
+ sp += 16;
+ }
- /*
- * Force 16 byte _final_ alignment here for generality.
- */
- sp = sp &~ (abi_ulong)15;
size = (DLINFO_ITEMS + 1) * 2;
if (k_platform)
size += 2;
@@ -1592,20 +1735,31 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
size += envc + argc + 2;
size += 1; /* argc itself */
size *= n;
- if (size & 15)
- sp -= 16 - (size & 15);
+
+ /* Allocate space and finalize stack alignment for entry now. */
+ if (STACK_GROWS_DOWN) {
+ u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
+ sp = u_argc;
+ } else {
+ u_argc = sp;
+ sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
+ }
+
+ u_argv = u_argc + n;
+ u_envp = u_argv + (argc + 1) * n;
+ u_auxv = u_envp + (envc + 1) * n;
+ info->saved_auxv = u_auxv;
+ info->arg_start = u_argv;
+ info->arg_end = u_argv + argc * n;
/* This is correct because Linux defines
* elf_addr_t as Elf32_Off / Elf64_Off
*/
#define NEW_AUX_ENT(id, val) do { \
- sp -= n; put_user_ual(val, sp); \
- sp -= n; put_user_ual(id, sp); \
+ put_user_ual(id, u_auxv); u_auxv += n; \
+ put_user_ual(val, u_auxv); u_auxv += n; \
} while(0)
- sp_auxv = sp;
- NEW_AUX_ENT (AT_NULL, 0);
-
/* There must be exactly DLINFO_ITEMS entries here. */
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
@@ -1626,8 +1780,9 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
#endif
- if (k_platform)
+ if (u_platform) {
NEW_AUX_ENT(AT_PLATFORM, u_platform);
+ }
#ifdef ARCH_DLINFO
/*
* ARCH_DLINFO must come last so platform specific code can enforce
@@ -1635,14 +1790,29 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
*/
ARCH_DLINFO;
#endif
+ NEW_AUX_ENT (AT_NULL, 0);
#undef NEW_AUX_ENT
- info->saved_auxv = sp;
- info->auxv_len = sp_auxv - sp;
+ info->auxv_len = u_argv - info->saved_auxv;
+
+ put_user_ual(argc, u_argc);
+
+ p = info->arg_strings;
+ for (i = 0; i < argc; ++i) {
+ put_user_ual(p, u_argv);
+ u_argv += n;
+ p += target_strlen(p) + 1;
+ }
+ put_user_ual(0, u_argv);
+
+ p = info->env_strings;
+ for (i = 0; i < envc; ++i) {
+ put_user_ual(p, u_envp);
+ u_envp += n;
+ p += target_strlen(p) + 1;
+ }
+ put_user_ual(0, u_envp);
- sp = loader_build_argptr(envc, argc, sp, p, 0);
- /* Check the right amount of stack was allocated for auxvec, envp & argv. */
- assert(sp_auxv - sp == size);
return sp;
}
@@ -2213,12 +2383,28 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
bprm->p = setup_arg_pages(bprm, info);
scratch = g_new0(char, TARGET_PAGE_SIZE);
- bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
- bprm->p, info->stack_limit);
- bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
- bprm->p, info->stack_limit);
- bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
- bprm->p, info->stack_limit);
+ if (STACK_GROWS_DOWN) {
+ bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
+ bprm->p, info->stack_limit);
+ info->file_string = bprm->p;
+ bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
+ bprm->p, info->stack_limit);
+ info->env_strings = bprm->p;
+ bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
+ bprm->p, info->stack_limit);
+ info->arg_strings = bprm->p;
+ } else {
+ info->arg_strings = bprm->p;
+ bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
+ bprm->p, info->stack_limit);
+ info->env_strings = bprm->p;
+ bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
+ bprm->p, info->stack_limit);
+ info->file_string = bprm->p;
+ bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
+ bprm->p, info->stack_limit);
+ }
+
g_free(scratch);
if (!bprm->p) {
diff --git a/linux-user/errno_defs.h b/linux-user/errno_defs.h
index 65522c4516..55fbebda51 100644
--- a/linux-user/errno_defs.h
+++ b/linux-user/errno_defs.h
@@ -140,6 +140,9 @@
#define TARGET_EOWNERDEAD 130 /* Owner died */
#define TARGET_ENOTRECOVERABLE 131 /* State not recoverable */
+#define TARGET_ERFKILL 132 /* Operation not possible due to RF-kill */
+#define TARGET_EHWPOISON 133 /* Memory page has hardware error */
+
/* QEMU internal, not visible to the guest. This is returned when a
* system call should be restarted, to tell the main loop that it
* should wind the guest PC backwards so it will re-execute the syscall
diff --git a/linux-user/hppa/sockbits.h b/linux-user/hppa/sockbits.h
new file mode 100644
index 0000000000..5044619e16
--- /dev/null
+++ b/linux-user/hppa/sockbits.h
@@ -0,0 +1,97 @@
+#define TARGET_SOL_SOCKET 0xffff
+
+#define TARGET_SO_DEBUG 0x0001
+#define TARGET_SO_REUSEADDR 0x0004
+#define TARGET_SO_KEEPALIVE 0x0008
+#define TARGET_SO_DONTROUTE 0x0010
+#define TARGET_SO_BROADCAST 0x0020
+#define TARGET_SO_LINGER 0x0080
+#define TARGET_SO_OOBINLINE 0x0100
+#define TARGET_SO_REUSEPORT 0x0200
+#define TARGET_SO_SNDBUF 0x1001
+#define TARGET_SO_RCVBUF 0x1002
+#define TARGET_SO_SNDBUFFORCE 0x100a
+#define TARGET_SO_RCVBUFFORCE 0x100b
+#define TARGET_SO_SNDLOWAT 0x1003
+#define TARGET_SO_RCVLOWAT 0x1004
+#define TARGET_SO_SNDTIMEO 0x1005
+#define TARGET_SO_RCVTIMEO 0x1006
+#define TARGET_SO_ERROR 0x1007
+#define TARGET_SO_TYPE 0x1008
+#define TARGET_SO_PROTOCOL 0x1028
+#define TARGET_SO_DOMAIN 0x1029
+#define TARGET_SO_PEERNAME 0x2000
+#define TARGET_SO_NO_CHECK 0x400b
+#define TARGET_SO_PRIORITY 0x400c
+#define TARGET_SO_BSDCOMPAT 0x400e
+#define TARGET_SO_PASSCRED 0x4010
+#define TARGET_SO_PEERCRED 0x4011
+#define TARGET_SO_TIMESTAMP 0x4012
+#define TARGET_SCM_TIMESTAMP TARGET_SO_TIMESTAMP
+#define TARGET_SO_TIMESTAMPNS 0x4013
+#define TARGET_SCM_TIMESTAMPNS TARGET_SO_TIMESTAMPNS
+
+#define TARGET_SO_SECURITY_AUTHENTICATION 0x4016
+#define TARGET_SO_SECURITY_ENCRYPTION_TRANSPORT 0x4017
+#define TARGET_SO_SECURITY_ENCRYPTION_NETWORK 0x4018
+
+#define TARGET_SO_BINDTODEVICE 0x4019
+#define TARGET_SO_ATTACH_FILTER 0x401a
+#define TARGET_SO_DETACH_FILTER 0x401b
+#define TARGET_SO_GET_FILTER TARGET_SO_ATTACH_FILTER
+#define TARGET_SO_ACCEPTCONN 0x401c
+#define TARGET_SO_PEERSEC 0x401d
+#define TARGET_SO_PASSSEC 0x401e
+#define TARGET_SO_MARK 0x401f
+#define TARGET_SO_TIMESTAMPING 0x4020
+#define TARGET_SCM_TIMESTAMPING TARGET_SO_TIMESTAMPING
+#define TARGET_SO_RXQ_OVFL 0x4021
+#define TARGET_SO_WIFI_STATUS 0x4022
+#define TARGET_SCM_WIFI_STATUS TARGET_SO_WIFI_STATUS
+#define TARGET_SO_PEEK_OFF 0x4023
+#define TARGET_SO_NOFCS 0x4024
+#define TARGET_SO_LOCK_FILTER 0x4025
+#define TARGET_SO_SELECT_ERR_QUEUE 0x4026
+#define TARGET_SO_BUSY_POLL 0x4027
+#define TARGET_SO_MAX_PACING_RATE 0x4028
+#define TARGET_SO_BPF_EXTENSIONS 0x4029
+#define TARGET_SO_INCOMING_CPU 0x402A
+#define TARGET_SO_ATTACH_BPF 0x402B
+#define TARGET_SO_DETACH_BPF TARGET_SO_DETACH_FILTER
+
+#define TARGET_SO_ATTACH_REUSEPORT_CBPF 0x402C
+#define TARGET_SO_ATTACH_REUSEPORT_EBPF 0x402D
+
+#define TARGET_SO_CNX_ADVICE 0x402E
+
+/** sock_type - Socket types - default values
+ *
+ *
+ * @SOCK_STREAM - stream (connection) socket
+ * @SOCK_DGRAM - datagram (conn.less) socket
+ * @SOCK_RAW - raw socket
+ * @SOCK_RDM - reliably-delivered message
+ * @SOCK_SEQPACKET - sequential packet socket
+ * @SOCK_DCCP - Datagram Congestion Control Protocol socket
+ * @SOCK_PACKET - linux specific way of getting packets at the dev level.
+ * For writing rarp and other similar things on the user
+ * level.
+ * @SOCK_CLOEXEC - sets the close-on-exec (FD_CLOEXEC) flag.
+ * @SOCK_NONBLOCK - sets the O_NONBLOCK file status flag.
+ */
+enum sock_type {
+ TARGET_SOCK_STREAM = 1,
+ TARGET_SOCK_DGRAM = 2,
+ TARGET_SOCK_RAW = 3,
+ TARGET_SOCK_RDM = 4,
+ TARGET_SOCK_SEQPACKET = 5,
+ TARGET_SOCK_DCCP = 6,
+ TARGET_SOCK_PACKET = 10,
+ TARGET_SOCK_CLOEXEC = 010000000,
+ TARGET_SOCK_NONBLOCK = 0x40000000,
+};
+
+#define TARGET_SOCK_MAX (TARGET_SOCK_PACKET + 1)
+#define TARGET_SOCK_TYPE_MASK 0xf /* Covers up to TARGET_SOCK_MAX-1. */
+
+#define ARCH_HAS_SOCKET_TYPES 1
diff --git a/linux-user/hppa/syscall_nr.h b/linux-user/hppa/syscall_nr.h
new file mode 100644
index 0000000000..0f396fa1e2
--- /dev/null
+++ b/linux-user/hppa/syscall_nr.h
@@ -0,0 +1,353 @@
+/*
+ * This file contains the system call numbers.
+ */
+
+#define TARGET_NR_restart_syscall 0
+#define TARGET_NR_exit 1
+#define TARGET_NR_fork 2
+#define TARGET_NR_read 3
+#define TARGET_NR_write 4
+#define TARGET_NR_open 5
+#define TARGET_NR_close 6
+#define TARGET_NR_waitpid 7
+#define TARGET_NR_creat 8
+#define TARGET_NR_link 9
+#define TARGET_NR_unlink 10
+#define TARGET_NR_execve 11
+#define TARGET_NR_chdir 12
+#define TARGET_NR_time 13
+#define TARGET_NR_mknod 14
+#define TARGET_NR_chmod 15
+#define TARGET_NR_lchown 16
+#define TARGET_NR_socket 17
+#define TARGET_NR_stat 18
+#define TARGET_NR_lseek 19
+#define TARGET_NR_getpid 20
+#define TARGET_NR_mount 21
+#define TARGET_NR_bind 22
+#define TARGET_NR_setuid 23
+#define TARGET_NR_getuid 24
+#define TARGET_NR_stime 25
+#define TARGET_NR_ptrace 26
+#define TARGET_NR_alarm 27
+#define TARGET_NR_fstat 28
+#define TARGET_NR_pause 29
+#define TARGET_NR_utime 30
+#define TARGET_NR_connect 31
+#define TARGET_NR_listen 32
+#define TARGET_NR_access 33
+#define TARGET_NR_nice 34
+#define TARGET_NR_accept 35
+#define TARGET_NR_sync 36
+#define TARGET_NR_kill 37
+#define TARGET_NR_rename 38
+#define TARGET_NR_mkdir 39
+#define TARGET_NR_rmdir 40
+#define TARGET_NR_dup 41
+#define TARGET_NR_pipe 42
+#define TARGET_NR_times 43
+#define TARGET_NR_getsockname 44
+#define TARGET_NR_brk 45
+#define TARGET_NR_setgid 46
+#define TARGET_NR_getgid 47
+#define TARGET_NR_signal 48
+#define TARGET_NR_geteuid 49
+#define TARGET_NR_getegid 50
+#define TARGET_NR_acct 51
+#define TARGET_NR_umount2 52
+#define TARGET_NR_getpeername 53
+#define TARGET_NR_ioctl 54
+#define TARGET_NR_fcntl 55
+#define TARGET_NR_socketpair 56
+#define TARGET_NR_setpgid 57
+#define TARGET_NR_send 58
+#define TARGET_NR_uname 59
+#define TARGET_NR_umask 60
+#define TARGET_NR_chroot 61
+#define TARGET_NR_ustat 62
+#define TARGET_NR_dup2 63
+#define TARGET_NR_getppid 64
+#define TARGET_NR_getpgrp 65
+#define TARGET_NR_setsid 66
+#define TARGET_NR_pivot_root 67
+#define TARGET_NR_sgetmask 68
+#define TARGET_NR_ssetmask 69
+#define TARGET_NR_setreuid 70
+#define TARGET_NR_setregid 71
+#define TARGET_NR_mincore 72
+#define TARGET_NR_sigpending 73
+#define TARGET_NR_sethostname 74
+#define TARGET_NR_setrlimit 75
+#define TARGET_NR_getrlimit 76
+#define TARGET_NR_getrusage 77
+#define TARGET_NR_gettimeofday 78
+#define TARGET_NR_settimeofday 79
+#define TARGET_NR_getgroups 80
+#define TARGET_NR_setgroups 81
+#define TARGET_NR_sendto 82
+#define TARGET_NR_symlink 83
+#define TARGET_NR_lstat 84
+#define TARGET_NR_readlink 85
+#define TARGET_NR_uselib 86
+#define TARGET_NR_swapon 87
+#define TARGET_NR_reboot 88
+#define TARGET_NR_mmap2 89
+#define TARGET_NR_mmap 90
+#define TARGET_NR_munmap 91
+#define TARGET_NR_truncate 92
+#define TARGET_NR_ftruncate 93
+#define TARGET_NR_fchmod 94
+#define TARGET_NR_fchown 95
+#define TARGET_NR_getpriority 96
+#define TARGET_NR_setpriority 97
+#define TARGET_NR_recv 98
+#define TARGET_NR_statfs 99
+#define TARGET_NR_fstatfs 100
+#define TARGET_NR_stat64 101
+#define TARGET_NR_socketcall 102
+#define TARGET_NR_syslog 103
+#define TARGET_NR_setitimer 104
+#define TARGET_NR_getitimer 105
+#define TARGET_NR_capget 106
+#define TARGET_NR_capset 107
+#define TARGET_NR_pread64 108
+#define TARGET_NR_pwrite64 109
+#define TARGET_NR_getcwd 110
+#define TARGET_NR_vhangup 111
+#define TARGET_NR_fstat64 112
+#define TARGET_NR_vfork 113
+#define TARGET_NR_wait4 114
+#define TARGET_NR_swapoff 115
+#define TARGET_NR_sysinfo 116
+#define TARGET_NR_shutdown 117
+#define TARGET_NR_fsync 118
+#define TARGET_NR_madvise 119
+#define TARGET_NR_clone 120
+#define TARGET_NR_setdomainname 121
+#define TARGET_NR_sendfile 122
+#define TARGET_NR_recvfrom 123
+#define TARGET_NR_adjtimex 124
+#define TARGET_NR_mprotect 125
+#define TARGET_NR_sigprocmask 126
+#define TARGET_NR_create_module 127
+#define TARGET_NR_init_module 128
+#define TARGET_NR_delete_module 129
+#define TARGET_NR_get_kernel_syms 130
+#define TARGET_NR_quotactl 131
+#define TARGET_NR_getpgid 132
+#define TARGET_NR_fchdir 133
+#define TARGET_NR_bdflush 134
+#define TARGET_NR_sysfs 135
+#define TARGET_NR_personality 136
+#define TARGET_NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define TARGET_NR_setfsuid 138
+#define TARGET_NR_setfsgid 139
+#define TARGET_NR__llseek 140
+#define TARGET_NR_getdents 141
+#define TARGET_NR__newselect 142
+#define TARGET_NR_flock 143
+#define TARGET_NR_msync 144
+#define TARGET_NR_readv 145
+#define TARGET_NR_writev 146
+#define TARGET_NR_getsid 147
+#define TARGET_NR_fdatasync 148
+#define TARGET_NR__sysctl 149
+#define TARGET_NR_mlock 150
+#define TARGET_NR_munlock 151
+#define TARGET_NR_mlockall 152
+#define TARGET_NR_munlockall 153
+#define TARGET_NR_sched_setparam 154
+#define TARGET_NR_sched_getparam 155
+#define TARGET_NR_sched_setscheduler 156
+#define TARGET_NR_sched_getscheduler 157
+#define TARGET_NR_sched_yield 158
+#define TARGET_NR_sched_get_priority_max 159
+#define TARGET_NR_sched_get_priority_min 160
+#define TARGET_NR_sched_rr_get_interval 161
+#define TARGET_NR_nanosleep 162
+#define TARGET_NR_mremap 163
+#define TARGET_NR_setresuid 164
+#define TARGET_NR_getresuid 165
+#define TARGET_NR_sigaltstack 166
+#define TARGET_NR_query_module 167
+#define TARGET_NR_poll 168
+#define TARGET_NR_nfsservctl 169
+#define TARGET_NR_setresgid 170
+#define TARGET_NR_getresgid 171
+#define TARGET_NR_prctl 172
+#define TARGET_NR_rt_sigreturn 173
+#define TARGET_NR_rt_sigaction 174
+#define TARGET_NR_rt_sigprocmask 175
+#define TARGET_NR_rt_sigpending 176
+#define TARGET_NR_rt_sigtimedwait 177
+#define TARGET_NR_rt_sigqueueinfo 178
+#define TARGET_NR_rt_sigsuspend 179
+#define TARGET_NR_chown 180
+#define TARGET_NR_setsockopt 181
+#define TARGET_NR_getsockopt 182
+#define TARGET_NR_sendmsg 183
+#define TARGET_NR_recvmsg 184
+#define TARGET_NR_semop 185
+#define TARGET_NR_semget 186
+#define TARGET_NR_semctl 187
+#define TARGET_NR_msgsnd 188
+#define TARGET_NR_msgrcv 189
+#define TARGET_NR_msgget 190
+#define TARGET_NR_msgctl 191
+#define TARGET_NR_shmat 192
+#define TARGET_NR_shmdt 193
+#define TARGET_NR_shmget 194
+#define TARGET_NR_shmctl 195
+#define TARGET_NR_getpmsg 196
+#define TARGET_NR_putpmsg 197
+#define TARGET_NR_lstat64 198
+#define TARGET_NR_truncate64 199
+#define TARGET_NR_ftruncate64 200
+#define TARGET_NR_getdents64 201
+#define TARGET_NR_fcntl64 202
+#define TARGET_NR_attrctl 203
+#define TARGET_NR_acl_get 204
+#define TARGET_NR_acl_set 205
+#define TARGET_NR_gettid 206
+#define TARGET_NR_readahead 207
+#define TARGET_NR_tkill 208
+#define TARGET_NR_sendfile64 209
+#define TARGET_NR_futex 210
+#define TARGET_NR_sched_setaffinity 211
+#define TARGET_NR_sched_getaffinity 212
+#define TARGET_NR_set_thread_area 213
+#define TARGET_NR_get_thread_area 214
+#define TARGET_NR_io_setup 215
+#define TARGET_NR_io_destroy 216
+#define TARGET_NR_io_getevents 217
+#define TARGET_NR_io_submit 218
+#define TARGET_NR_io_cancel 219
+#define TARGET_NR_alloc_hugepages 220
+#define TARGET_NR_free_hugepages 221
+#define TARGET_NR_exit_group 222
+#define TARGET_NR_lookup_dcookie 223
+#define TARGET_NR_epoll_create 224
+#define TARGET_NR_epoll_ctl 225
+#define TARGET_NR_epill_wait 226
+#define TARGET_NR_remap_file_pages 227
+#define TARGET_NR_semtimedop 228
+#define TARGET_NR_mq_open 229
+#define TARGET_NR_mq_unlink 230
+#define TARGET_NR_mq_timedsend 231
+#define TARGET_NR_mq_timedreceive 232
+#define TARGET_NR_mq_notify 233
+#define TARGET_NR_mq_getsetattr 234
+#define TARGET_NR_waitid 235
+#define TARGET_NR_fadvise64_64 236
+#define TARGET_NR_set_tid_address 237
+#define TARGET_NR_setxattr 238
+#define TARGET_NR_lsetxattr 239
+#define TARGET_NR_fsetxattr 240
+#define TARGET_NR_getxattr 241
+#define TARGET_NR_lgetxattr 242
+#define TARGET_NR_fgetxattr 243
+#define TARGET_NR_listxattr 244
+#define TARGET_NR_llistxattr 245
+#define TARGET_NR_flistxattr 246
+#define TARGET_NR_removexattr 247
+#define TARGET_NR_lremovexattr 248
+#define TARGET_NR_fremovexattr 249
+#define TARGET_NR_timer_create 250
+#define TARGET_NR_timer_settime 251
+#define TARGET_NR_timer_gettime 252
+#define TARGET_NR_timer_getoverrun 253
+#define TARGET_NR_timer_delete 254
+#define TARGET_NR_clock_settime 255
+#define TARGET_NR_clock_gettime 256
+#define TARGET_NR_clock_getres 257
+#define TARGET_NR_clock_nanosleep 258
+#define TARGET_NR_tgkill 259
+#define TARGET_NR_mbind 260
+#define TARGET_NR_get_mempolicy 261
+#define TARGET_NR_set_mempolicy 262
+#define TARGET_NR_vserver 263
+#define TARGET_NR_add_key 264
+#define TARGET_NR_request_key 265
+#define TARGET_NR_keyctl 266
+#define TARGET_NR_ioprio_set 267
+#define TARGET_NR_ioprio_get 268
+#define TARGET_NR_inotify_init 269
+#define TARGET_NR_inotify_add_watch 270
+#define TARGET_NR_inotify_rm_watch 271
+#define TARGET_NR_migrate_pages 272
+#define TARGET_NR_pselect6 273
+#define TARGET_NR_ppoll 274
+#define TARGET_NR_openat 275
+#define TARGET_NR_mkdirat 276
+#define TARGET_NR_mknotat 277
+#define TARGET_NR_fchownat 278
+#define TARGET_NR_futimesat 279
+#define TARGET_NR_fstatat64 280
+#define TARGET_NR_unlinkat 281
+#define TARGET_NR_renameat 282
+#define TARGET_NR_linkat 283
+#define TARGET_NR_symlinkat 284
+#define TARGET_NR_readlinkat 285
+#define TARGET_NR_fchmodat 286
+#define TARGET_NR_faccessat 287
+#define TARGET_NR_unshare 288
+#define TARGET_NR_set_robust_list 289
+#define TARGET_NR_get_robust_list 290
+#define TARGET_NR_splice 291
+#define TARGET_NR_sync_file_range 292
+#define TARGET_NR_tee 293
+#define TARGET_NR_vmsplice 294
+#define TARGET_NR_move_pages 295
+#define TARGET_NR_getcpu 296
+#define TARGET_NR_epoll_pwait 297
+#define TARGET_NR_statfs64 298
+#define TARGET_NR_fstatfs64 299
+#define TARGET_NR_kexec_load 300
+#define TARGET_NR_utimensat 301
+#define TARGET_NR_signalfd 302
+#define TARGET_NR_timerfd 303
+#define TARGET_NR_eventfd 304
+#define TARGET_NR_fallocate 305
+#define TARGET_NR_timerfd_create 306
+#define TARGET_NR_timerfd_settime 307
+#define TARGET_NR_timerfd_gettime 308
+#define TARGET_NR_signalfd4 309
+#define TARGET_NR_eventfd2 310
+#define TARGET_NR_epoll_create1 311
+#define TARGET_NR_dup3 312
+#define TARGET_NR_pipe2 313
+#define TARGET_NR_inotify_init1 314
+#define TARGET_NR_preadv 315
+#define TARGET_NR_pwritev 316
+#define TARGET_NR_rt_tgsigqueueinfo 317
+#define TARGET_NR_perf_event_open 318
+#define TARGET_NR_recvmmsg 319
+#define TARGET_NR_accept4 320
+#define TARGET_NR_prlimit64 321
+#define TARGET_NR_fanotify_init 322
+#define TARGET_NR_fanotify_mark 323
+#define TARGET_NR_clock_adjtime 324
+#define TARGET_NR_name_to_handle_at 325
+#define TARGET_NR_open_by_handle_at 326
+#define TARGET_NR_syncfs 327
+#define TARGET_NR_setns 328
+#define TARGET_NR_sendmmsg 329
+#define TARGET_NR_process_vm_readv 330
+#define TARGET_NR_process_vm_writev 331
+#define TARGET_NR_kcmp 332
+#define TARGET_NR_finit_module 333
+#define TARGET_NR_sched_setattr 334
+#define TARGET_NR_sched_getattr 335
+#define TARGET_NR_utimes 336
+#define TARGET_NR_renameat2 337
+#define TARGET_NR_seccomp 338
+#define TARGET_NR_getrandom 339
+#define TARGET_NR_memfd_create 340
+#define TARGET_NR_bpf 341
+#define TARGET_NR_execveat 342
+#define TARGET_NR_membarrier 343
+#define TARGET_NR_userfaultfd 344
+#define TARGET_NR_mlock2 345
+#define TARGET_NR_copy_file_range 346
+#define TARGET_NR_preadv2 347
+#define TARGET_NR_pwritev2 348
diff --git a/linux-user/hppa/target_cpu.h b/linux-user/hppa/target_cpu.h
new file mode 100644
index 0000000000..1a5cecad3c
--- /dev/null
+++ b/linux-user/hppa/target_cpu.h
@@ -0,0 +1,35 @@
+/*
+ * HPPA specific CPU ABI and functions for linux-user
+ *
+ * Copyright (c) 2016 Richard Henderson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef ALPHA_TARGET_CPU_H
+#define ALPHA_TARGET_CPU_H
+
+static inline void cpu_clone_regs(CPUHPPAState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->gr[30] = newsp;
+ }
+ env->gr[28] = 0;
+}
+
+static inline void cpu_set_tls(CPUHPPAState *env, target_ulong newtls)
+{
+ env->cr27 = newtls;
+}
+
+#endif
diff --git a/linux-user/hppa/target_signal.h b/linux-user/hppa/target_signal.h
new file mode 100644
index 0000000000..e115890b48
--- /dev/null
+++ b/linux-user/hppa/target_signal.h
@@ -0,0 +1,29 @@
+#ifndef HPPA_TARGET_SIGNAL_H
+#define HPPA_TARGET_SIGNAL_H
+
+#include "cpu.h"
+
+/* this struct defines a stack used during syscall handling */
+
+typedef struct target_sigaltstack {
+ abi_ulong ss_sp;
+ int32_t ss_flags;
+ abi_ulong ss_size;
+} target_stack_t;
+
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_SIGSTKSZ 8192
+
+static inline abi_ulong get_sp_from_cpustate(CPUHPPAState *state)
+{
+ return state->gr[30];
+}
+
+#endif /* HPPA_TARGET_SIGNAL_H */
diff --git a/linux-user/hppa/target_structs.h b/linux-user/hppa/target_structs.h
new file mode 100644
index 0000000000..b560b1872b
--- /dev/null
+++ b/linux-user/hppa/target_structs.h
@@ -0,0 +1,54 @@
+/*
+ * HPPA specific structures for linux-user
+ *
+ * Copyright (c) 2016 Richard Henderson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HPPA_TARGET_STRUCTS_H
+#define HPPA_TARGET_STRUCTS_H
+
+struct target_ipc_perm {
+ abi_int __key; /* Key. */
+ abi_uint uid; /* Owner's user ID. */
+ abi_uint gid; /* Owner's group ID. */
+ abi_uint cuid; /* Creator's user ID. */
+ abi_uint cgid; /* Creator's group ID. */
+ abi_ushort __pad1;
+ abi_ushort mode; /* Read/write permission. */
+ abi_ushort __pad2;
+ abi_ushort __seq; /* Sequence number. */
+ abi_uint __pad3;
+ uint64_t __unused1;
+ uint64_t __unused2;
+};
+
+struct target_shmid_ds {
+ struct target_ipc_perm shm_perm; /* operation permission struct */
+ abi_uint __pad1;
+ abi_ulong shm_atime; /* time of last shmat() */
+ abi_uint __pad2;
+ abi_ulong shm_dtime; /* time of last shmdt() */
+ abi_uint __pad3;
+ abi_ulong shm_ctime; /* time of last change by shmctl() */
+ abi_uint __pad4;
+ abi_long shm_segsz; /* size of segment in bytes */
+ abi_int shm_cpid; /* pid of creator */
+ abi_int shm_lpid; /* pid of last shmop */
+ abi_ulong shm_nattch; /* number of current attaches */
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+#endif
diff --git a/linux-user/hppa/target_syscall.h b/linux-user/hppa/target_syscall.h
new file mode 100644
index 0000000000..ac18a9c575
--- /dev/null
+++ b/linux-user/hppa/target_syscall.h
@@ -0,0 +1,237 @@
+#ifndef HPPA_TARGET_SYSCALL_H
+#define HPPA_TARGET_SYSCALL_H
+
+struct target_pt_regs {
+ target_ulong gr[32];
+ uint64_t fr[32];
+ target_ulong sr[8];
+ target_ulong iasq[2];
+ target_ulong iaoq[2];
+ target_ulong cr27;
+ target_ulong __pad0;
+ target_ulong orig_r28;
+ target_ulong ksp;
+ target_ulong kpc;
+ target_ulong sar;
+ target_ulong iir;
+ target_ulong isr;
+ target_ulong ior;
+ target_ulong ipsw;
+};
+
+#define UNAME_MACHINE "hppa"
+#define UNAME_MINIMUM_RELEASE "2.6.32"
+#define TARGET_CLONE_BACKWARDS
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_MLOCKALL_MCL_CURRENT 1
+#define TARGET_MLOCKALL_MCL_FUTURE 2
+
+#undef TARGET_ENOMSG
+#define TARGET_ENOMSG 35
+#undef TARGET_EIDRM
+#define TARGET_EIDRM 36
+#undef TARGET_ECHRNG
+#define TARGET_ECHRNG 37
+#undef TARGET_EL2NSYNC
+#define TARGET_EL2NSYNC 38
+#undef TARGET_EL3HLT
+#define TARGET_EL3HLT 39
+#undef TARGET_EL3RST
+#define TARGET_EL3RST 40
+#undef TARGET_ELNRNG
+#define TARGET_ELNRNG 41
+#undef TARGET_EUNATCH
+#define TARGET_EUNATCH 42
+#undef TARGET_ENOCSI
+#define TARGET_ENOCSI 43
+#undef TARGET_EL2HLT
+#define TARGET_EL2HLT 44
+#undef TARGET_EDEADLK
+#define TARGET_EDEADLK 45
+#undef TARGET_ENOLCK
+#define TARGET_ENOLCK 46
+#undef TARGET_EILSEQ
+#define TARGET_EILSEQ 47
+
+#undef TARGET_ENONET
+#define TARGET_ENONET 50
+#undef TARGET_ENODATA
+#define TARGET_ENODATA 51
+#undef TARGET_ETIME
+#define TARGET_ETIME 52
+#undef TARGET_ENOSR
+#define TARGET_ENOSR 53
+#undef TARGET_ENOSTR
+#define TARGET_ENOSTR 54
+#undef TARGET_ENOPKG
+#define TARGET_ENOPKG 55
+
+#undef TARGET_ENOLINK
+#define TARGET_ENOLINK 57
+#undef TARGET_EADV
+#define TARGET_EADV 58
+#undef TARGET_ESRMNT
+#define TARGET_ESRMNT 59
+#undef TARGET_ECOMM
+#define TARGET_ECOMM 60
+#undef TARGET_EPROTO
+#define TARGET_EPROTO 61
+
+#undef TARGET_EMULTIHOP
+#define TARGET_EMULTIHOP 64
+
+#undef TARGET_EDOTDOT
+#define TARGET_EDOTDOT 66
+#undef TARGET_EBADMSG
+#define TARGET_EBADMSG 67
+#undef TARGET_EUSERS
+#define TARGET_EUSERS 68
+#undef TARGET_EDQUOT
+#define TARGET_EDQUOT 69
+#undef TARGET_ESTALE
+#define TARGET_ESTALE 70
+#undef TARGET_EREMOTE
+#define TARGET_EREMOTE 71
+#undef TARGET_EOVERFLOW
+#define TARGET_EOVERFLOW 72
+
+#undef TARGET_EBADE
+#define TARGET_EBADE 160
+#undef TARGET_EBADR
+#define TARGET_EBADR 161
+#undef TARGET_EXFULL
+#define TARGET_EXFULL 162
+#undef TARGET_ENOANO
+#define TARGET_ENOANO 163
+#undef TARGET_EBADRQC
+#define TARGET_EBADRQC 164
+#undef TARGET_EBADSLT
+#define TARGET_EBADSLT 165
+#undef TARGET_EBFONT
+#define TARGET_EBFONT 166
+#undef TARGET_ENOTUNIQ
+#define TARGET_ENOTUNIQ 167
+#undef TARGET_EBADFD
+#define TARGET_EBADFD 168
+#undef TARGET_EREMCHG
+#define TARGET_EREMCHG 169
+#undef TARGET_ELIBACC
+#define TARGET_ELIBACC 170
+#undef TARGET_ELIBBAD
+#define TARGET_ELIBBAD 171
+#undef TARGET_ELIBSCN
+#define TARGET_ELIBSCN 172
+#undef TARGET_ELIBMAX
+#define TARGET_ELIBMAX 173
+#undef TARGET_ELIBEXEC
+#define TARGET_ELIBEXEC 174
+#undef TARGET_ERESTART
+#define TARGET_ERESTART 175
+#undef TARGET_ESTRPIPE
+#define TARGET_ESTRPIPE 176
+#undef TARGET_EUCLEAN
+#define TARGET_EUCLEAN 177
+#undef TARGET_ENOTNAM
+#define TARGET_ENOTNAM 178
+#undef TARGET_ENAVAIL
+#define TARGET_ENAVAIL 179
+#undef TARGET_EISNAM
+#define TARGET_EISNAM 180
+#undef TARGET_EREMOTEIO
+#define TARGET_EREMOTEIO 181
+#undef TARGET_ENOMEDIUM
+#define TARGET_ENOMEDIUM 182
+#undef TARGET_EMEDIUMTYPE
+#define TARGET_EMEDIUMTYPE 183
+#undef TARGET_ENOKEY
+#define TARGET_ENOKEY 184
+#undef TARGET_EKEYEXPIRED
+#define TARGET_EKEYEXPIRED 185
+#undef TARGET_EKEYREVOKED
+#define TARGET_EKEYREVOKED 186
+#undef TARGET_EKEYREJECTED
+#define TARGET_EKEYREJECTED 187
+
+/* Never used in linux. */
+/* #define TARGET_ENOSYM 215 */
+#undef TARGET_ENOTSOCK
+#define TARGET_ENOTSOCK 216
+#undef TARGET_EDESTADDRREQ
+#define TARGET_EDESTADDRREQ 217
+#undef TARGET_EMSGSIZE
+#define TARGET_EMSGSIZE 218
+#undef TARGET_EPROTOTYPE
+#define TARGET_EPROTOTYPE 219
+#undef TARGET_ENOPROTOOPT
+#define TARGET_ENOPROTOOPT 220
+#undef TARGET_EPROTONOSUPPORT
+#define TARGET_EPROTONOSUPPORT 221
+#undef TARGET_ESOCKTNOSUPPORT
+#define TARGET_ESOCKTNOSUPPORT 222
+#undef TARGET_EOPNOTSUPP
+#define TARGET_EOPNOTSUPP 223
+#undef TARGET_EPFNOSUPPORT
+#define TARGET_EPFNOSUPPORT 224
+#undef TARGET_EAFNOSUPPORT
+#define TARGET_EAFNOSUPPORT 225
+#undef TARGET_EADDRINUSE
+#define TARGET_EADDRINUSE 226
+#undef TARGET_EADDRNOTAVAIL
+#define TARGET_EADDRNOTAVAIL 227
+#undef TARGET_ENETDOWN
+#define TARGET_ENETDOWN 228
+#undef TARGET_ENETUNREACH
+#define TARGET_ENETUNREACH 229
+#undef TARGET_ENETRESET
+#define TARGET_ENETRESET 230
+#undef TARGET_ECONNABORTED
+#define TARGET_ECONNABORTED 231
+#undef TARGET_ECONNRESET
+#define TARGET_ECONNRESET 232
+#undef TARGET_ENOBUFS
+#define TARGET_ENOBUFS 233
+#undef TARGET_EISCONN
+#define TARGET_EISCONN 234
+#undef TARGET_ENOTCONN
+#define TARGET_ENOTCONN 235
+#undef TARGET_ESHUTDOWN
+#define TARGET_ESHUTDOWN 236
+#undef TARGET_ETOOMANYREFS
+#define TARGET_ETOOMANYREFS 237
+#undef TARGET_ETIMEDOUT
+#define TARGET_ETIMEDOUT 238
+#undef TARGET_ECONNREFUSED
+#define TARGET_ECONNREFUSED 239
+#define TARGET_EREMOTERELEASE 240
+#undef TARGET_EHOSTDOWN
+#define TARGET_EHOSTDOWN 241
+#undef TARGET_EHOSTUNREACH
+#define TARGET_EHOSTUNREACH 242
+
+#undef TARGET_EALREADY
+#define TARGET_EALREADY 244
+#undef TARGET_EINPROGRESS
+#define TARGET_EINPROGRESS 245
+#undef TARGET_ENOTEMPTY
+#define TARGET_ENOTEMPTY 247
+#undef TARGET_ENAMETOOLONG
+#define TARGET_ENAMETOOLONG 248
+#undef TARGET_ELOOP
+#define TARGET_ELOOP 249
+#undef TARGET_ENOSYS
+#define TARGET_ENOSYS 251
+
+#undef TARGET_ECANCELED
+#define TARGET_ECANCELED 253
+
+#undef TARGET_EOWNERDEAD
+#define TARGET_EOWNERDEAD 254
+#undef TARGET_ENOTRECOVERABLE
+#define TARGET_ENOTRECOVERABLE 255
+
+#undef TARGET_ERFKILL
+#define TARGET_ERFKILL 256
+#undef TARGET_EHWPOISON
+#define TARGET_EHWPOISON 257
+
+#endif /* HPPA_TARGET_SYSCALL_H */
diff --git a/linux-user/hppa/termbits.h b/linux-user/hppa/termbits.h
new file mode 100644
index 0000000000..e9633ef119
--- /dev/null
+++ b/linux-user/hppa/termbits.h
@@ -0,0 +1,219 @@
+/* from asm/termbits.h */
+
+#define TARGET_NCCS 19
+
+struct target_termios {
+ unsigned int c_iflag; /* input mode flags */
+ unsigned int c_oflag; /* output mode flags */
+ unsigned int c_cflag; /* control mode flags */
+ unsigned int c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCCS]; /* control characters */
+};
+
+/* c_iflag bits */
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IUCLC 0001000
+#define TARGET_IXON 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IXOFF 0010000
+#define TARGET_IMAXBEL 0040000
+#define TARGET_IUTF8 0100000
+
+/* c_oflag bits */
+#define TARGET_OPOST 0000001
+#define TARGET_OLCUC 0000002
+#define TARGET_ONLCR 0000004
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+#define TARGET_OFILL 0000100
+#define TARGET_OFDEL 0000200
+#define TARGET_NLDLY 0000400
+#define TARGET_NL0 0000000
+#define TARGET_NL1 0000400
+#define TARGET_CRDLY 0003000
+#define TARGET_CR0 0000000
+#define TARGET_CR1 0001000
+#define TARGET_CR2 0002000
+#define TARGET_CR3 0003000
+#define TARGET_TABDLY 0014000
+#define TARGET_TAB0 0000000
+#define TARGET_TAB1 0004000
+#define TARGET_TAB2 0010000
+#define TARGET_TAB3 0014000
+#define TARGET_XTABS 0014000
+#define TARGET_BSDLY 0020000
+#define TARGET_BS0 0000000
+#define TARGET_BS1 0020000
+#define TARGET_VTDLY 0040000
+#define TARGET_VT0 0000000
+#define TARGET_VT1 0040000
+#define TARGET_FFDLY 0100000
+#define TARGET_FF0 0000000
+#define TARGET_FF1 0100000
+
+/* c_cflag bit meaning */
+#define TARGET_CBAUD 0010017
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
+#define TARGET_EXTA B19200
+#define TARGET_EXTB B38400
+#define TARGET_CSIZE 0000060
+#define TARGET_CS5 0000000
+#define TARGET_CS6 0000020
+#define TARGET_CS7 0000040
+#define TARGET_CS8 0000060
+#define TARGET_CSTOPB 0000100
+#define TARGET_CREAD 0000200
+#define TARGET_PARENB 0000400
+#define TARGET_PARODD 0001000
+#define TARGET_HUPCL 0002000
+#define TARGET_CLOCAL 0004000
+#define TARGET_CBAUDEX 0010000
+#define TARGET_B57600 0010001
+#define TARGET_B115200 0010002
+#define TARGET_B230400 0010003
+#define TARGET_B460800 0010004
+#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define TARGET_ISIG 0000001
+#define TARGET_ICANON 0000002
+#define TARGET_XCASE 0000004
+#define TARGET_ECHO 0000010
+#define TARGET_ECHOE 0000020
+#define TARGET_ECHOK 0000040
+#define TARGET_ECHONL 0000100
+#define TARGET_NOFLSH 0000200
+#define TARGET_TOSTOP 0000400
+#define TARGET_ECHOCTL 0001000
+#define TARGET_ECHOPRT 0002000
+#define TARGET_ECHOKE 0004000
+#define TARGET_FLUSHO 0010000
+#define TARGET_PENDIN 0040000
+#define TARGET_IEXTEN 0100000
+
+/* c_cc character offsets */
+#define TARGET_VINTR 0
+#define TARGET_VQUIT 1
+#define TARGET_VERASE 2
+#define TARGET_VKILL 3
+#define TARGET_VEOF 4
+#define TARGET_VTIME 5
+#define TARGET_VMIN 6
+#define TARGET_VSWTC 7
+#define TARGET_VSTART 8
+#define TARGET_VSTOP 9
+#define TARGET_VSUSP 10
+#define TARGET_VEOL 11
+#define TARGET_VREPRINT 12
+#define TARGET_VDISCARD 13
+#define TARGET_VWERASE 14
+#define TARGET_VLNEXT 15
+#define TARGET_VEOL2 16
+
+/* ioctls */
+
+#define TARGET_TCGETS TARGET_IOR('T', 16, struct target_termios)
+#define TARGET_TCSETS TARGET_IOW('T', 17, struct target_termios)
+#define TARGET_TCSETSW TARGET_IOW('T', 18, struct target_termios)
+#define TARGET_TCSETSF TARGET_IOW('T', 19, struct target_termios)
+#define TARGET_TCGETA TARGET_IOR('T', 1, struct target_termios)
+#define TARGET_TCSETA TARGET_IOW('T', 2, struct target_termios)
+#define TARGET_TCSETAW TARGET_IOW('T', 3, struct target_termios)
+#define TARGET_TCSETAF TARGET_IOW('T', 4, struct target_termios)
+#define TARGET_TCSBRK TARGET_IO('T', 5)
+#define TARGET_TCXONC TARGET_IO('T', 6)
+#define TARGET_TCFLSH TARGET_IO('T', 7)
+
+#define TARGET_TIOCEXCL 0x540C
+#define TARGET_TIOCNXCL 0x540D
+#define TARGET_TIOCSCTTY 0x540E
+#define TARGET_TIOCGPGRP TARGET_IOR('T', 30, int)
+#define TARGET_TIOCSPGRP TARGET_IOW('T', 29, int)
+#define TARGET_TIOCOUTQ 0x5411
+#define TARGET_TIOCSTI 0x5412
+#define TARGET_TIOCGWINSZ 0x5413
+#define TARGET_TIOCSWINSZ 0x5414
+#define TARGET_TIOCMGET 0x5415
+#define TARGET_TIOCMBIS 0x5416
+#define TARGET_TIOCMBIC 0x5417
+#define TARGET_TIOCMSET 0x5418
+#define TARGET_TIOCGSOFTCAR 0x5419
+#define TARGET_TIOCSSOFTCAR 0x541A
+#define TARGET_FIONREAD 0x541B
+#define TARGET_TIOCINQ TARGET_FIONREAD
+#define TARGET_TIOCLINUX 0x541C
+#define TARGET_TIOCCONS 0x541D
+#define TARGET_TIOCGSERIAL 0x541E
+#define TARGET_TIOCSSERIAL 0x541F
+#define TARGET_TIOCPKT 0x5420
+#define TARGET_FIONBIO 0x5421
+#define TARGET_TIOCNOTTY 0x5422
+#define TARGET_TIOCSETD 0x5423
+#define TARGET_TIOCGETD 0x5424
+#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
+#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
+#define TARGET_TIOCGSID TARGET_IOR('T', 20, int)
+#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int)
+ /* Get Pty Number (of pty-mux device) */
+#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int)
+ /* Lock/unlock Pty */
+
+#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define TARGET_FIOCLEX 0x5451
+#define TARGET_FIOASYNC 0x5452
+#define TARGET_TIOCSERCONFIG 0x5453
+#define TARGET_TIOCSERGWILD 0x5454
+#define TARGET_TIOCSERSWILD 0x5455
+#define TARGET_TIOCGLCKTRMIOS 0x5456
+#define TARGET_TIOCSLCKTRMIOS 0x5457
+#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial */
+#define TARGET_TIOCGICOUNT 0x545D
+#define TARGET_FIOQSIZE 0x5460
+#define TARGET_TIOCSTART 0x5461
+#define TARGET_TIOCSTOP 0x5462
+#define TARGET_TIOCSLTC 0x5462
+
+/* Used for packet mode */
+#define TARGET_TIOCPKT_DATA 0
+#define TARGET_TIOCPKT_FLUSHREAD 1
+#define TARGET_TIOCPKT_FLUSHWRITE 2
+#define TARGET_TIOCPKT_STOP 4
+#define TARGET_TIOCPKT_START 8
+#define TARGET_TIOCPKT_NOSTOP 16
+#define TARGET_TIOCPKT_DOSTOP 32
+
+#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
diff --git a/linux-user/ioctls.h b/linux-user/ioctls.h
index 1bad701481..2f6e85bd78 100644
--- a/linux-user/ioctls.h
+++ b/linux-user/ioctls.h
@@ -164,6 +164,9 @@
IOCTL(SIOCSRARP, IOC_W, MK_PTR(MK_STRUCT(STRUCT_arpreq)))
IOCTL(SIOCGRARP, IOC_R, MK_PTR(MK_STRUCT(STRUCT_arpreq)))
IOCTL(SIOCGIWNAME, IOC_W | IOC_R, MK_PTR(MK_STRUCT(STRUCT_char_ifreq)))
+ IOCTL(SIOCGPGRP, IOC_R, MK_PTR(TYPE_INT)) /* pid_t */
+ IOCTL(SIOCGSTAMP, IOC_R, MK_PTR(MK_STRUCT(STRUCT_timeval)))
+ IOCTL(SIOCGSTAMPNS, IOC_R, MK_PTR(MK_STRUCT(STRUCT_timespec)))
IOCTL(CDROMPAUSE, 0, TYPE_NULL)
IOCTL(CDROMSTART, 0, TYPE_NULL)
@@ -422,3 +425,8 @@
MK_PTR(MK_STRUCT(STRUCT_rtentry)))
IOCTL_SPECIAL(SIOCDELRT, IOC_W, do_ioctl_rt,
MK_PTR(MK_STRUCT(STRUCT_rtentry)))
+
+#ifdef TARGET_TIOCSTART
+ IOCTL_IGNORE(TIOCSTART)
+ IOCTL_IGNORE(TIOCSTOP)
+#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index 75b199f274..f5c85574f9 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -68,8 +68,11 @@ do { \
* This way we will never overlap with our own libraries or binaries or stack
* or anything else that QEMU maps.
*/
-# ifdef TARGET_MIPS
-/* MIPS only supports 31 bits of virtual address space for user space */
+# if defined(TARGET_MIPS) || defined(TARGET_NIOS2)
+/*
+ * MIPS only supports 31 bits of virtual address space for user space.
+ * Nios2 also only supports 31 bits.
+ */
unsigned long reserved_va = 0x77000000;
# else
unsigned long reserved_va = 0xf7000000;
@@ -1166,7 +1169,7 @@ void cpu_loop (CPUSPARCState *env)
/* XXX: check env->error_code */
info.si_code = TARGET_SEGV_MAPERR;
if (trapnr == TT_DFAULT)
- info._sifields._sigfault._addr = env->dmmuregs[4];
+ info._sifields._sigfault._addr = env->dmmu.mmuregs[4];
else
info._sifields._sigfault._addr = cpu_tsptr(env)->tpc;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
@@ -2462,6 +2465,109 @@ error:
}
#endif
+#ifdef TARGET_NIOS2
+
+void cpu_loop(CPUNios2State *env)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ target_siginfo_t info;
+ int trapnr, gdbsig, ret;
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ gdbsig = 0;
+
+ switch (trapnr) {
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_TRAP:
+ if (env->regs[R_AT] == 0) {
+ abi_long ret;
+ qemu_log_mask(CPU_LOG_INT, "\nSyscall\n");
+
+ ret = do_syscall(env, env->regs[2],
+ env->regs[4], env->regs[5], env->regs[6],
+ env->regs[7], env->regs[8], env->regs[9],
+ 0, 0);
+
+ if (env->regs[2] == 0) { /* FIXME: syscall 0 workaround */
+ ret = 0;
+ }
+
+ env->regs[2] = abs(ret);
+ /* Return value is 0..4096 */
+ env->regs[7] = (ret > 0xfffffffffffff000ULL);
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] &= ~0x3;
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] += 4;
+ break;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
+
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] &= ~0x3;
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->exception_addr;
+
+ gdbsig = TARGET_SIGTRAP;
+ break;
+ }
+ case 0xaa:
+ switch (env->regs[R_PC]) {
+ /*case 0x1000:*/ /* TODO:__kuser_helper_version */
+ case 0x1004: /* __kuser_cmpxchg */
+ start_exclusive();
+ if (env->regs[4] & 0x3) {
+ goto kuser_fail;
+ }
+ ret = get_user_u32(env->regs[2], env->regs[4]);
+ if (ret) {
+ end_exclusive();
+ goto kuser_fail;
+ }
+ env->regs[2] -= env->regs[5];
+ if (env->regs[2] == 0) {
+ put_user_u32(env->regs[6], env->regs[4]);
+ }
+ end_exclusive();
+ env->regs[R_PC] = env->regs[R_RA];
+ break;
+ /*case 0x1040:*/ /* TODO:__kuser_sigtramp */
+ default:
+ ;
+kuser_fail:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ /* TODO: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->regs[R_PC];
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ }
+ break;
+ default:
+ EXCP_DUMP(env, "\nqemu: unhandled CPU exception %#x - aborting\n",
+ trapnr);
+ gdbsig = TARGET_SIGILL;
+ break;
+ }
+ if (gdbsig) {
+ gdb_handlesig(cs, gdbsig);
+ if (gdbsig != TARGET_SIGTRAP) {
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ process_pending_signals(env);
+ }
+}
+
+#endif /* TARGET_NIOS2 */
+
#ifdef TARGET_OPENRISC
void cpu_loop(CPUOpenRISCState *env)
@@ -2864,6 +2970,13 @@ void cpu_loop(CPUM68KState *env)
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
+ case EXCP_DIV0:
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_INTDIV;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
case EXCP_TRAP0:
{
abi_long ret;
@@ -3505,6 +3618,169 @@ void cpu_loop(CPUTLGState *env)
#endif
+#ifdef TARGET_HPPA
+
+static abi_ulong hppa_lws(CPUHPPAState *env)
+{
+ uint32_t which = env->gr[20];
+ abi_ulong addr = env->gr[26];
+ abi_ulong old = env->gr[25];
+ abi_ulong new = env->gr[24];
+ abi_ulong size, ret;
+
+ switch (which) {
+ default:
+ return -TARGET_ENOSYS;
+
+ case 0: /* elf32 atomic 32bit cmpxchg */
+ if ((addr & 3) || !access_ok(VERIFY_WRITE, addr, 4)) {
+ return -TARGET_EFAULT;
+ }
+ old = tswap32(old);
+ new = tswap32(new);
+ ret = atomic_cmpxchg((uint32_t *)g2h(addr), old, new);
+ ret = tswap32(ret);
+ break;
+
+ case 2: /* elf32 atomic "new" cmpxchg */
+ size = env->gr[23];
+ if (size >= 4) {
+ return -TARGET_ENOSYS;
+ }
+ if (((addr | old | new) & ((1 << size) - 1))
+ || !access_ok(VERIFY_WRITE, addr, 1 << size)
+ || !access_ok(VERIFY_READ, old, 1 << size)
+ || !access_ok(VERIFY_READ, new, 1 << size)) {
+ return -TARGET_EFAULT;
+ }
+ /* Note that below we use host-endian loads so that the cmpxchg
+ can be host-endian as well. */
+ switch (size) {
+ case 0:
+ old = *(uint8_t *)g2h(old);
+ new = *(uint8_t *)g2h(new);
+ ret = atomic_cmpxchg((uint8_t *)g2h(addr), old, new);
+ ret = ret != old;
+ break;
+ case 1:
+ old = *(uint16_t *)g2h(old);
+ new = *(uint16_t *)g2h(new);
+ ret = atomic_cmpxchg((uint16_t *)g2h(addr), old, new);
+ ret = ret != old;
+ break;
+ case 2:
+ old = *(uint32_t *)g2h(old);
+ new = *(uint32_t *)g2h(new);
+ ret = atomic_cmpxchg((uint32_t *)g2h(addr), old, new);
+ ret = ret != old;
+ break;
+ case 3:
+ {
+ uint64_t o64, n64, r64;
+ o64 = *(uint64_t *)g2h(old);
+ n64 = *(uint64_t *)g2h(new);
+#ifdef CONFIG_ATOMIC64
+ r64 = atomic_cmpxchg__nocheck((uint64_t *)g2h(addr), o64, n64);
+ ret = r64 != o64;
+#else
+ start_exclusive();
+ r64 = *(uint64_t *)g2h(addr);
+ ret = 1;
+ if (r64 == o64) {
+ *(uint64_t *)g2h(addr) = n64;
+ ret = 0;
+ }
+ end_exclusive();
+#endif
+ }
+ break;
+ }
+ break;
+ }
+
+ env->gr[28] = ret;
+ return 0;
+}
+
+void cpu_loop(CPUHPPAState *env)
+{
+ CPUState *cs = CPU(hppa_env_get_cpu(env));
+ target_siginfo_t info;
+ abi_ulong ret;
+ int trapnr;
+
+ while (1) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+
+ switch (trapnr) {
+ case EXCP_SYSCALL:
+ ret = do_syscall(env, env->gr[20],
+ env->gr[26], env->gr[25],
+ env->gr[24], env->gr[23],
+ env->gr[22], env->gr[21], 0, 0);
+ switch (ret) {
+ default:
+ env->gr[28] = ret;
+ /* We arrived here by faking the gateway page. Return. */
+ env->iaoq_f = env->gr[31];
+ env->iaoq_b = env->gr[31] + 4;
+ break;
+ case -TARGET_ERESTARTSYS:
+ case -TARGET_QEMU_ESIGRETURN:
+ break;
+ }
+ break;
+ case EXCP_SYSCALL_LWS:
+ env->gr[21] = hppa_lws(env);
+ /* We arrived here by faking the gateway page. Return. */
+ env->iaoq_f = env->gr[31];
+ env->iaoq_b = env->gr[31] + 4;
+ break;
+ case EXCP_SIGSEGV:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = TARGET_SEGV_ACCERR;
+ info._sifields._sigfault._addr = env->ior;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_SIGILL:
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPN;
+ info._sifields._sigfault._addr = env->iaoq_f;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_SIGFPE:
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = 0;
+ info._sifields._sigfault._addr = env->iaoq_f;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_DEBUG:
+ trapnr = gdb_handlesig(cs, TARGET_SIGTRAP);
+ if (trapnr) {
+ info.si_signo = trapnr;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, trapnr, QEMU_SI_FAULT, &info);
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ process_pending_signals(env);
+ }
+}
+
+#endif /* TARGET_HPPA */
+
THREAD CPUState *thread_cpu;
bool qemu_cpu_is_self(CPUState *cpu)
@@ -4172,15 +4448,16 @@ int main(int argc, char **argv, char **envp)
qemu_log("start_brk 0x" TARGET_ABI_FMT_lx "\n", info->start_brk);
qemu_log("end_code 0x" TARGET_ABI_FMT_lx "\n", info->end_code);
- qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n",
- info->start_code);
- qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n",
- info->start_data);
+ qemu_log("start_code 0x" TARGET_ABI_FMT_lx "\n", info->start_code);
+ qemu_log("start_data 0x" TARGET_ABI_FMT_lx "\n", info->start_data);
qemu_log("end_data 0x" TARGET_ABI_FMT_lx "\n", info->end_data);
- qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n",
- info->start_stack);
+ qemu_log("start_stack 0x" TARGET_ABI_FMT_lx "\n", info->start_stack);
qemu_log("brk 0x" TARGET_ABI_FMT_lx "\n", info->brk);
qemu_log("entry 0x" TARGET_ABI_FMT_lx "\n", info->entry);
+ qemu_log("argv_start 0x" TARGET_ABI_FMT_lx "\n", info->arg_start);
+ qemu_log("env_start 0x" TARGET_ABI_FMT_lx "\n",
+ info->arg_end + (abi_ulong)sizeof(abi_ulong));
+ qemu_log("auxv_start 0x" TARGET_ABI_FMT_lx "\n", info->saved_auxv);
}
target_set_brk(info->brk);
@@ -4461,6 +4738,36 @@ int main(int argc, char **argv, char **envp)
restore_snan_bit_mode(env);
}
}
+#elif defined(TARGET_NIOS2)
+ {
+ env->regs[0] = 0;
+ env->regs[1] = regs->r1;
+ env->regs[2] = regs->r2;
+ env->regs[3] = regs->r3;
+ env->regs[4] = regs->r4;
+ env->regs[5] = regs->r5;
+ env->regs[6] = regs->r6;
+ env->regs[7] = regs->r7;
+ env->regs[8] = regs->r8;
+ env->regs[9] = regs->r9;
+ env->regs[10] = regs->r10;
+ env->regs[11] = regs->r11;
+ env->regs[12] = regs->r12;
+ env->regs[13] = regs->r13;
+ env->regs[14] = regs->r14;
+ env->regs[15] = regs->r15;
+ /* TODO: unsigned long orig_r2; */
+ env->regs[R_RA] = regs->ra;
+ env->regs[R_FP] = regs->fp;
+ env->regs[R_SP] = regs->sp;
+ env->regs[R_GP] = regs->gp;
+ env->regs[CR_ESTATUS] = regs->estatus;
+ env->regs[R_EA] = regs->ea;
+ /* TODO: unsigned long orig_r7; */
+
+ /* Emulate eret when starting thread. */
+ env->regs[R_PC] = regs->ea;
+ }
#elif defined(TARGET_OPENRISC)
{
int i;
@@ -4531,6 +4838,15 @@ int main(int argc, char **argv, char **envp)
}
env->pc = regs->pc;
}
+#elif defined(TARGET_HPPA)
+ {
+ int i;
+ for (i = 1; i < 32; i++) {
+ env->gr[i] = regs->gr[i];
+ }
+ env->iaoq_f = regs->iaoq[0];
+ env->iaoq_b = regs->iaoq[1];
+ }
#else
#error unsupported target CPU
#endif
diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h
index 0b64b73714..2fca1c6bf9 100644
--- a/linux-user/mips/target_syscall.h
+++ b/linux-user/mips/target_syscall.h
@@ -221,6 +221,11 @@ struct target_pt_regs {
#undef TARGET_ENOTRECOVERABLE
#define TARGET_ENOTRECOVERABLE 166 /* State not recoverable */
+#undef TARGET_ERFKILL
+#define TARGET_ERFKILL 167
+#undef TARGET_EHWPOISON
+#define TARGET_EHWPOISON 168
+
#undef TARGET_EDQUOT
#define TARGET_EDQUOT 1133 /* Quota exceeded */
diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h
index 6692917e2e..078437d765 100644
--- a/linux-user/mips64/target_syscall.h
+++ b/linux-user/mips64/target_syscall.h
@@ -218,6 +218,11 @@ struct target_pt_regs {
#undef TARGET_ENOTRECOVERABLE
#define TARGET_ENOTRECOVERABLE 166 /* State not recoverable */
+#undef TARGET_ERFKILL
+#define TARGET_ERFKILL 167
+#undef TARGET_EHWPOISON
+#define TARGET_EHWPOISON 168
+
#undef TARGET_EDQUOT
#define TARGET_EDQUOT 1133 /* Quota exceeded */
diff --git a/linux-user/nios2/syscall_nr.h b/linux-user/nios2/syscall_nr.h
new file mode 100644
index 0000000000..8b46763673
--- /dev/null
+++ b/linux-user/nios2/syscall_nr.h
@@ -0,0 +1,329 @@
+#define TARGET_NR_io_setup 0
+#define TARGET_NR_io_destroy 1
+#define TARGET_NR_io_submit 2
+#define TARGET_NR_io_cancel 3
+#define TARGET_NR_io_getevents 4
+#define TARGET_NR_setxattr 5
+#define TARGET_NR_lsetxattr 6
+#define TARGET_NR_fsetxattr 7
+#define TARGET_NR_getxattr 8
+#define TARGET_NR_lgetxattr 9
+#define TARGET_NR_fgetxattr 10
+#define TARGET_NR_listxattr 11
+#define TARGET_NR_llistxattr 12
+#define TARGET_NR_flistxattr 13
+#define TARGET_NR_removexattr 14
+#define TARGET_NR_lremovexattr 15
+#define TARGET_NR_fremovexattr 16
+#define TARGET_NR_getcwd 17
+#define TARGET_NR_lookup_dcookie 18
+#define TARGET_NR_eventfd2 19
+#define TARGET_NR_epoll_create1 20
+#define TARGET_NR_epoll_ctl 21
+#define TARGET_NR_epoll_pwait 22
+#define TARGET_NR_dup 23
+#define TARGET_NR_dup3 24
+#define TARGET_NR_fcntl64 25
+#define TARGET_NR_inotify_init1 26
+#define TARGET_NR_inotify_add_watch 27
+#define TARGET_NR_inotify_rm_watch 28
+#define TARGET_NR_ioctl 29
+#define TARGET_NR_ioprio_set 30
+#define TARGET_NR_ioprio_get 31
+#define TARGET_NR_flock 32
+#define TARGET_NR_mknodat 33
+#define TARGET_NR_mkdirat 34
+#define TARGET_NR_unlinkat 35
+#define TARGET_NR_symlinkat 36
+#define TARGET_NR_linkat 37
+#define TARGET_NR_renameat 38
+#define TARGET_NR_umount2 39
+#define TARGET_NR_mount 40
+#define TARGET_NR_pivot_root 41
+#define TARGET_NR_nfsservctl 42
+#define TARGET_NR_statfs64 43
+#define TARGET_NR_fstatfs64 44
+#define TARGET_NR_truncate64 45
+#define TARGET_NR_ftruncate64 46
+#define TARGET_NR_fallocate 47
+#define TARGET_NR_faccessat 48
+#define TARGET_NR_chdir 49
+#define TARGET_NR_fchdir 50
+#define TARGET_NR_chroot 51
+#define TARGET_NR_fchmod 52
+#define TARGET_NR_fchmodat 53
+#define TARGET_NR_fchownat 54
+#define TARGET_NR_fchown 55
+#define TARGET_NR_openat 56
+#define TARGET_NR_close 57
+#define TARGET_NR_vhangup 58
+#define TARGET_NR_pipe2 59
+#define TARGET_NR_quotactl 60
+#define TARGET_NR_getdents64 61
+#define TARGET_NR_read 63
+#define TARGET_NR_write 64
+#define TARGET_NR_readv 65
+#define TARGET_NR_writev 66
+#define TARGET_NR_pread64 67
+#define TARGET_NR_pwrite64 68
+#define TARGET_NR_preadv 69
+#define TARGET_NR_pwritev 70
+#define TARGET_NR_sendfile64 71
+#define TARGET_NR_pselect6 72
+#define TARGET_NR_ppoll 73
+#define TARGET_NR_signalfd4 74
+#define TARGET_NR_vmsplice 75
+#define TARGET_NR_splice 76
+#define TARGET_NR_tee 77
+#define TARGET_NR_readlinkat 78
+#define TARGET_NR_fstatat64 79
+#define TARGET_NR_fstat64 80
+#define TARGET_NR_sync 81
+#define TARGET_NR_fsync 82
+#define TARGET_NR_fdatasync 83
+#define TARGET_NR_sync_file_range 84
+#define TARGET_NR_timerfd_create 85
+#define TARGET_NR_timerfd_settime 86
+#define TARGET_NR_timerfd_gettime 87
+#define TARGET_NR_utimensat 88
+#define TARGET_NR_acct 89
+#define TARGET_NR_capget 90
+#define TARGET_NR_capset 91
+#define TARGET_NR_personality 92
+#define TARGET_NR_exit 93
+#define TARGET_NR_exit_group 94
+#define TARGET_NR_waitid 95
+#define TARGET_NR_set_tid_address 96
+#define TARGET_NR_unshare 97
+#define TARGET_NR_futex 98
+#define TARGET_NR_set_robust_list 99
+#define TARGET_NR_get_robust_list 100
+#define TARGET_NR_nanosleep 101
+#define TARGET_NR_getitimer 102
+#define TARGET_NR_setitimer 103
+#define TARGET_NR_kexec_load 104
+#define TARGET_NR_init_module 105
+#define TARGET_NR_delete_module 106
+#define TARGET_NR_timer_create 107
+#define TARGET_NR_timer_gettime 108
+#define TARGET_NR_timer_getoverrun 109
+#define TARGET_NR_timer_settime 110
+#define TARGET_NR_timer_delete 111
+#define TARGET_NR_clock_settime 112
+#define TARGET_NR_clock_gettime 113
+#define TARGET_NR_clock_getres 114
+#define TARGET_NR_clock_nanosleep 115
+#define TARGET_NR_syslog 116
+#define TARGET_NR_ptrace 117
+#define TARGET_NR_sched_setparam 118
+#define TARGET_NR_sched_setscheduler 119
+#define TARGET_NR_sched_getscheduler 120
+#define TARGET_NR_sched_getparam 121
+#define TARGET_NR_sched_setaffinity 122
+#define TARGET_NR_sched_getaffinity 123
+#define TARGET_NR_sched_yield 124
+#define TARGET_NR_sched_get_priority_max 125
+#define TARGET_NR_sched_get_priority_min 126
+#define TARGET_NR_sched_rr_get_interval 127
+#define TARGET_NR_restart_syscall 128
+#define TARGET_NR_kill 129
+#define TARGET_NR_tkill 130
+#define TARGET_NR_tgkill 131
+#define TARGET_NR_sigaltstack 132
+#define TARGET_NR_rt_sigsuspend 133
+#define TARGET_NR_rt_sigaction 134
+#define TARGET_NR_rt_sigprocmask 135
+#define TARGET_NR_rt_sigpending 136
+#define TARGET_NR_rt_sigtimedwait 137
+#define TARGET_NR_rt_sigqueueinfo 138
+#define TARGET_NR_rt_sigreturn 139
+#define TARGET_NR_setpriority 140
+#define TARGET_NR_getpriority 141
+#define TARGET_NR_reboot 142
+#define TARGET_NR_setregid 143
+#define TARGET_NR_setgid 144
+#define TARGET_NR_setreuid 145
+#define TARGET_NR_setuid 146
+#define TARGET_NR_setresuid 147
+#define TARGET_NR_getresuid 148
+#define TARGET_NR_setresgid 149
+#define TARGET_NR_getresgid 150
+#define TARGET_NR_setfsuid 151
+#define TARGET_NR_setfsgid 152
+#define TARGET_NR_times 153
+#define TARGET_NR_setpgid 154
+#define TARGET_NR_getpgid 155
+#define TARGET_NR_getsid 156
+#define TARGET_NR_setsid 157
+#define TARGET_NR_getgroups 158
+#define TARGET_NR_setgroups 159
+#define TARGET_NR_uname 160
+#define TARGET_NR_sethostname 161
+#define TARGET_NR_setdomainname 162
+#define TARGET_NR_getrlimit 163
+#define TARGET_NR_setrlimit 164
+#define TARGET_NR_getrusage 165
+#define TARGET_NR_umask 166
+#define TARGET_NR_prctl 167
+#define TARGET_NR_getcpu 168
+#define TARGET_NR_gettimeofday 169
+#define TARGET_NR_settimeofday 170
+#define TARGET_NR_adjtimex 171
+#define TARGET_NR_getpid 172
+#define TARGET_NR_getppid 173
+#define TARGET_NR_getuid 174
+#define TARGET_NR_geteuid 175
+#define TARGET_NR_getgid 176
+#define TARGET_NR_getegid 177
+#define TARGET_NR_gettid 178
+#define TARGET_NR_sysinfo 179
+#define TARGET_NR_mq_open 180
+#define TARGET_NR_mq_unlink 181
+#define TARGET_NR_mq_timedsend 182
+#define TARGET_NR_mq_timedreceive 183
+#define TARGET_NR_mq_notify 184
+#define TARGET_NR_mq_getsetattr 185
+#define TARGET_NR_msgget 186
+#define TARGET_NR_msgctl 187
+#define TARGET_NR_msgrcv 188
+#define TARGET_NR_msgsnd 189
+#define TARGET_NR_semget 190
+#define TARGET_NR_semctl 191
+#define TARGET_NR_semtimedop 192
+#define TARGET_NR_semop 193
+#define TARGET_NR_shmget 194
+#define TARGET_NR_shmctl 195
+#define TARGET_NR_shmat 196
+#define TARGET_NR_shmdt 197
+#define TARGET_NR_socket 198
+#define TARGET_NR_socketpair 199
+#define TARGET_NR_bind 200
+#define TARGET_NR_listen 201
+#define TARGET_NR_accept 202
+#define TARGET_NR_connect 203
+#define TARGET_NR_getsockname 204
+#define TARGET_NR_getpeername 205
+#define TARGET_NR_sendto 206
+#define TARGET_NR_recvfrom 207
+#define TARGET_NR_setsockopt 208
+#define TARGET_NR_getsockopt 209
+#define TARGET_NR_shutdown 210
+#define TARGET_NR_sendmsg 211
+#define TARGET_NR_recvmsg 212
+#define TARGET_NR_readahead 213
+#define TARGET_NR_brk 214
+#define TARGET_NR_munmap 215
+#define TARGET_NR_mremap 216
+#define TARGET_NR_add_key 217
+#define TARGET_NR_request_key 218
+#define TARGET_NR_keyctl 219
+#define TARGET_NR_clone 220
+#define TARGET_NR_execve 221
+#define TARGET_NR_mmap2 222
+#define TARGET_NR_fadvise64_64 223
+#define TARGET_NR_swapon 224
+#define TARGET_NR_swapoff 225
+#define TARGET_NR_mprotect 226
+#define TARGET_NR_msync 227
+#define TARGET_NR_mlock 228
+#define TARGET_NR_munlock 229
+#define TARGET_NR_mlockall 230
+#define TARGET_NR_munlockall 231
+#define TARGET_NR_mincore 232
+#define TARGET_NR_madvise 233
+#define TARGET_NR_remap_file_pages 234
+#define TARGET_NR_mbind 235
+#define TARGET_NR_get_mempolicy 236
+#define TARGET_NR_set_mempolicy 237
+#define TARGET_NR_migrate_pages 238
+#define TARGET_NR_move_pages 239
+#define TARGET_NR_rt_tgsigqueueinfo 240
+#define TARGET_NR_perf_event_open 241
+#define TARGET_NR_accept4 242
+#define TARGET_NR_recvmmsg 243
+#define TARGET_NR_cacheflush 244
+#define TARGET_NR_arch_specific_syscall 244
+#define TARGET_NR_wait4 260
+#define TARGET_NR_prlimit64 261
+#define TARGET_NR_fanotify_init 262
+#define TARGET_NR_fanotify_mark 263
+#define TARGET_NR_name_to_handle_at 264
+#define TARGET_NR_open_by_handle_at 265
+#define TARGET_NR_clock_adjtime 266
+#define TARGET_NR_syncfs 267
+#define TARGET_NR_setns 268
+#define TARGET_NR_sendmmsg 269
+#define TARGET_NR_process_vm_readv 270
+#define TARGET_NR_process_vm_writev 271
+#define TARGET_NR_kcmp 272
+#define TARGET_NR_finit_module 273
+#define TARGET_NR_sched_setattr 274
+#define TARGET_NR_sched_getattr 275
+#define TARGET_NR_renameat2 276
+#define TARGET_NR_seccomp 277
+#define TARGET_NR_getrandom 278
+#define TARGET_NR_memfd_create 279
+#define TARGET_NR_bpf 280
+#define TARGET_NR_execveat 281
+#define TARGET_NR_userfaultfd 282
+#define TARGET_NR_membarrier 283
+#define TARGET_NR_mlock2 284
+#define TARGET_NR_copy_file_range 285
+#define TARGET_NR_preadv2 286
+#define TARGET_NR_pwritev2 287
+#define TARGET_NR_open 1024
+#define TARGET_NR_link 1025
+#define TARGET_NR_unlink 1026
+#define TARGET_NR_mknod 1027
+#define TARGET_NR_chmod 1028
+#define TARGET_NR_chown 1029
+#define TARGET_NR_mkdir 1030
+#define TARGET_NR_rmdir 1031
+#define TARGET_NR_lchown 1032
+#define TARGET_NR_access 1033
+#define TARGET_NR_rename 1034
+#define TARGET_NR_readlink 1035
+#define TARGET_NR_symlink 1036
+#define TARGET_NR_utimes 1037
+#define TARGET_NR_3264_stat 1038
+#define TARGET_NR_3264_lstat 1039
+#define TARGET_NR_pipe 1040
+#define TARGET_NR_dup2 1041
+#define TARGET_NR_epoll_create 1042
+#define TARGET_NR_inotify_init 1043
+#define TARGET_NR_eventfd 1044
+#define TARGET_NR_signalfd 1045
+#define TARGET_NR_sendfile 1046
+#define TARGET_NR_ftruncate 1047
+#define TARGET_NR_truncate 1048
+#define TARGET_NR_stat 1049
+#define TARGET_NR_lstat 1050
+#define TARGET_NR_fstat 1051
+#define TARGET_NR_fcntl 1052
+#define TARGET_NR_fadvise64 1053
+#define TARGET_NR_newfstatat 1054
+#define TARGET_NR_fstatfs 1055
+#define TARGET_NR_statfs 1056
+#define TARGET_NR_lseek 1057
+#define TARGET_NR_mmap 1058
+#define TARGET_NR_alarm 1059
+#define TARGET_NR_getpgrp 1060
+#define TARGET_NR_pause 1061
+#define TARGET_NR_time 1062
+#define TARGET_NR_utime 1063
+#define TARGET_NR_creat 1064
+#define TARGET_NR_getdents 1065
+#define TARGET_NR_futimesat 1066
+#define TARGET_NR_select 1067
+#define TARGET_NR_poll 1068
+#define TARGET_NR_epoll_wait 1069
+#define TARGET_NR_ustat 1070
+#define TARGET_NR_vfork 1071
+#define TARGET_NR_oldwait4 1072
+#define TARGET_NR_recv 1073
+#define TARGET_NR_send 1074
+#define TARGET_NR_bdflush 1075
+#define TARGET_NR_umount 1076
+#define TARGET_NR_uselib 1077
+#define TARGET_NR__sysctl 1078
+#define TARGET_NR_fork 1079
diff --git a/linux-user/nios2/target_cpu.h b/linux-user/nios2/target_cpu.h
new file mode 100644
index 0000000000..20ab4790a9
--- /dev/null
+++ b/linux-user/nios2/target_cpu.h
@@ -0,0 +1,39 @@
+/*
+ * Nios2 specific CPU ABI and functions for linux-user
+ *
+ * Copyright (c) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_CPU_H
+#define TARGET_CPU_H
+
+static inline void cpu_clone_regs(CPUNios2State *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->regs[R_SP] = newsp;
+ }
+ env->regs[R_RET0] = 0;
+}
+
+static inline void cpu_set_tls(CPUNios2State *env, target_ulong newtls)
+{
+ /*
+ * Linux kernel 3.10 does not pay any attention to CLONE_SETTLS
+ * in copy_thread(), so QEMU need not do so either.
+ */
+}
+
+#endif
diff --git a/linux-user/nios2/target_signal.h b/linux-user/nios2/target_signal.h
new file mode 100644
index 0000000000..23a8267696
--- /dev/null
+++ b/linux-user/nios2/target_signal.h
@@ -0,0 +1,26 @@
+#ifndef TARGET_SIGNAL_H
+#define TARGET_SIGNAL_H
+
+#include "cpu.h"
+
+/* this struct defines a stack used during syscall handling */
+
+typedef struct target_sigaltstack {
+ abi_long ss_sp;
+ abi_ulong ss_size;
+ abi_long ss_flags;
+} target_stack_t;
+
+/* sigaltstack controls */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_SIGSTKSZ 8192
+
+static inline abi_ulong get_sp_from_cpustate(CPUNios2State *state)
+{
+ return state->regs[R_SP];
+}
+
+#endif /* TARGET_SIGNAL_H */
diff --git a/linux-user/nios2/target_structs.h b/linux-user/nios2/target_structs.h
new file mode 100644
index 0000000000..8713772089
--- /dev/null
+++ b/linux-user/nios2/target_structs.h
@@ -0,0 +1,58 @@
+/*
+ * Nios2 specific structures for linux-user
+ *
+ * Copyright (c) 2016 Marek Vasut <marex@denx.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TARGET_STRUCTS_H
+#define TARGET_STRUCTS_H
+
+struct target_ipc_perm {
+ abi_int __key; /* Key. */
+ abi_uint uid; /* Owner's user ID. */
+ abi_uint gid; /* Owner's group ID. */
+ abi_uint cuid; /* Creator's user ID. */
+ abi_uint cgid; /* Creator's group ID. */
+ abi_ushort mode; /* Read/write permission. */
+ abi_ushort __pad1;
+ abi_ushort __seq; /* Sequence number. */
+ abi_ushort __pad2;
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+struct target_shmid_ds {
+ struct target_ipc_perm shm_perm; /* operation permission struct */
+ abi_long shm_segsz; /* size of segment in bytes */
+ abi_ulong shm_atime; /* time of last shmat() */
+#if TARGET_ABI_BITS == 32
+ abi_ulong __unused1;
+#endif
+ abi_ulong shm_dtime; /* time of last shmdt() */
+#if TARGET_ABI_BITS == 32
+ abi_ulong __unused2;
+#endif
+ abi_ulong shm_ctime; /* time of last change by shmctl() */
+#if TARGET_ABI_BITS == 32
+ abi_ulong __unused3;
+#endif
+ abi_int shm_cpid; /* pid of creator */
+ abi_int shm_lpid; /* pid of last shmop */
+ abi_ulong shm_nattch; /* number of current attaches */
+ abi_ulong __unused4;
+ abi_ulong __unused5;
+};
+
+#endif
diff --git a/linux-user/nios2/target_syscall.h b/linux-user/nios2/target_syscall.h
new file mode 100644
index 0000000000..ca6b7e69f6
--- /dev/null
+++ b/linux-user/nios2/target_syscall.h
@@ -0,0 +1,37 @@
+#ifndef TARGET_SYSCALL_H
+#define TARGET_SYSCALL_H
+
+#define UNAME_MACHINE "nios2"
+#define UNAME_MINIMUM_RELEASE "3.19.0"
+
+struct target_pt_regs {
+ unsigned long r8; /* r8-r15 Caller-saved GP registers */
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r1; /* Assembler temporary */
+ unsigned long r2; /* Retval LS 32bits */
+ unsigned long r3; /* Retval MS 32bits */
+ unsigned long r4; /* r4-r7 Register arguments */
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long orig_r2; /* Copy of r2 ?? */
+ unsigned long ra; /* Return address */
+ unsigned long fp; /* Frame pointer */
+ unsigned long sp; /* Stack pointer */
+ unsigned long gp; /* Global pointer */
+ unsigned long estatus;
+ unsigned long ea; /* Exception return address (pc) */
+ unsigned long orig_r7;
+};
+
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_MLOCKALL_MCL_CURRENT 1
+#define TARGET_MLOCKALL_MCL_FUTURE 2
+
+#endif /* TARGET_SYSCALL_H */
diff --git a/linux-user/nios2/termbits.h b/linux-user/nios2/termbits.h
new file mode 100644
index 0000000000..b64ba974cf
--- /dev/null
+++ b/linux-user/nios2/termbits.h
@@ -0,0 +1,220 @@
+/* from asm/termbits.h */
+/* NOTE: exactly the same as i386 */
+
+#define TARGET_NCCS 19
+
+struct target_termios {
+ unsigned int c_iflag; /* input mode flags */
+ unsigned int c_oflag; /* output mode flags */
+ unsigned int c_cflag; /* control mode flags */
+ unsigned int c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCCS]; /* control characters */
+};
+
+/* c_iflag bits */
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IUCLC 0001000
+#define TARGET_IXON 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IXOFF 0010000
+#define TARGET_IMAXBEL 0020000
+#define TARGET_IUTF8 0040000
+
+/* c_oflag bits */
+#define TARGET_OPOST 0000001
+#define TARGET_OLCUC 0000002
+#define TARGET_ONLCR 0000004
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+#define TARGET_OFILL 0000100
+#define TARGET_OFDEL 0000200
+#define TARGET_NLDLY 0000400
+#define TARGET_NL0 0000000
+#define TARGET_NL1 0000400
+#define TARGET_CRDLY 0003000
+#define TARGET_CR0 0000000
+#define TARGET_CR1 0001000
+#define TARGET_CR2 0002000
+#define TARGET_CR3 0003000
+#define TARGET_TABDLY 0014000
+#define TARGET_TAB0 0000000
+#define TARGET_TAB1 0004000
+#define TARGET_TAB2 0010000
+#define TARGET_TAB3 0014000
+#define TARGET_XTABS 0014000
+#define TARGET_BSDLY 0020000
+#define TARGET_BS0 0000000
+#define TARGET_BS1 0020000
+#define TARGET_VTDLY 0040000
+#define TARGET_VT0 0000000
+#define TARGET_VT1 0040000
+#define TARGET_FFDLY 0100000
+#define TARGET_FF0 0000000
+#define TARGET_FF1 0100000
+
+/* c_cflag bit meaning */
+#define TARGET_CBAUD 0010017
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
+#define TARGET_EXTA B19200
+#define TARGET_EXTB B38400
+#define TARGET_CSIZE 0000060
+#define TARGET_CS5 0000000
+#define TARGET_CS6 0000020
+#define TARGET_CS7 0000040
+#define TARGET_CS8 0000060
+#define TARGET_CSTOPB 0000100
+#define TARGET_CREAD 0000200
+#define TARGET_PARENB 0000400
+#define TARGET_PARODD 0001000
+#define TARGET_HUPCL 0002000
+#define TARGET_CLOCAL 0004000
+#define TARGET_CBAUDEX 0010000
+#define TARGET_B57600 0010001
+#define TARGET_B115200 0010002
+#define TARGET_B230400 0010003
+#define TARGET_B460800 0010004
+#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define TARGET_ISIG 0000001
+#define TARGET_ICANON 0000002
+#define TARGET_XCASE 0000004
+#define TARGET_ECHO 0000010
+#define TARGET_ECHOE 0000020
+#define TARGET_ECHOK 0000040
+#define TARGET_ECHONL 0000100
+#define TARGET_NOFLSH 0000200
+#define TARGET_TOSTOP 0000400
+#define TARGET_ECHOCTL 0001000
+#define TARGET_ECHOPRT 0002000
+#define TARGET_ECHOKE 0004000
+#define TARGET_FLUSHO 0010000
+#define TARGET_PENDIN 0040000
+#define TARGET_IEXTEN 0100000
+
+/* c_cc character offsets */
+#define TARGET_VINTR 0
+#define TARGET_VQUIT 1
+#define TARGET_VERASE 2
+#define TARGET_VKILL 3
+#define TARGET_VEOF 4
+#define TARGET_VTIME 5
+#define TARGET_VMIN 6
+#define TARGET_VSWTC 7
+#define TARGET_VSTART 8
+#define TARGET_VSTOP 9
+#define TARGET_VSUSP 10
+#define TARGET_VEOL 11
+#define TARGET_VREPRINT 12
+#define TARGET_VDISCARD 13
+#define TARGET_VWERASE 14
+#define TARGET_VLNEXT 15
+#define TARGET_VEOL2 16
+
+/* ioctls */
+
+#define TARGET_TCGETS 0x5401
+#define TARGET_TCSETS 0x5402
+#define TARGET_TCSETSW 0x5403
+#define TARGET_TCSETSF 0x5404
+#define TARGET_TCGETA 0x5405
+#define TARGET_TCSETA 0x5406
+#define TARGET_TCSETAW 0x5407
+#define TARGET_TCSETAF 0x5408
+#define TARGET_TCSBRK 0x5409
+#define TARGET_TCXONC 0x540A
+#define TARGET_TCFLSH 0x540B
+
+#define TARGET_TIOCEXCL 0x540C
+#define TARGET_TIOCNXCL 0x540D
+#define TARGET_TIOCSCTTY 0x540E
+#define TARGET_TIOCGPGRP 0x540F
+#define TARGET_TIOCSPGRP 0x5410
+#define TARGET_TIOCOUTQ 0x5411
+#define TARGET_TIOCSTI 0x5412
+#define TARGET_TIOCGWINSZ 0x5413
+#define TARGET_TIOCSWINSZ 0x5414
+#define TARGET_TIOCMGET 0x5415
+#define TARGET_TIOCMBIS 0x5416
+#define TARGET_TIOCMBIC 0x5417
+#define TARGET_TIOCMSET 0x5418
+#define TARGET_TIOCGSOFTCAR 0x5419
+#define TARGET_TIOCSSOFTCAR 0x541A
+#define TARGET_FIONREAD 0x541B
+#define TARGET_TIOCINQ TARGET_FIONREAD
+#define TARGET_TIOCLINUX 0x541C
+#define TARGET_TIOCCONS 0x541D
+#define TARGET_TIOCGSERIAL 0x541E
+#define TARGET_TIOCSSERIAL 0x541F
+#define TARGET_TIOCPKT 0x5420
+#define TARGET_FIONBIO 0x5421
+#define TARGET_TIOCNOTTY 0x5422
+#define TARGET_TIOCSETD 0x5423
+#define TARGET_TIOCGETD 0x5424
+#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
+#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
+#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int)
+ /* Get Pty Number (of pty-mux device) */
+#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int)
+ /* Lock/unlock Pty */
+
+#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define TARGET_FIOCLEX 0x5451
+#define TARGET_FIOASYNC 0x5452
+#define TARGET_TIOCSERCONFIG 0x5453
+#define TARGET_TIOCSERGWILD 0x5454
+#define TARGET_TIOCSERSWILD 0x5455
+#define TARGET_TIOCGLCKTRMIOS 0x5456
+#define TARGET_TIOCSLCKTRMIOS 0x5457
+#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TARGET_TIOCMIWAIT 0x545C
+ /* wait for a change on serial input line(s) */
+#define TARGET_TIOCGICOUNT 0x545D
+ /* read serial port inline interrupt counts */
+#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
+
+/* Used for packet mode */
+#define TARGET_TIOCPKT_DATA 0
+#define TARGET_TIOCPKT_FLUSHREAD 1
+#define TARGET_TIOCPKT_FLUSHWRITE 2
+#define TARGET_TIOCPKT_STOP 4
+#define TARGET_TIOCPKT_START 8
+#define TARGET_TIOCPKT_NOSTOP 16
+#define TARGET_TIOCPKT_DOSTOP 32
+
+#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index da73a01106..4edd7d0c08 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -48,6 +48,9 @@ struct image_info {
abi_ulong auxv_len;
abi_ulong arg_start;
abi_ulong arg_end;
+ abi_ulong arg_strings;
+ abi_ulong env_strings;
+ abi_ulong file_string;
uint32_t elf_flags;
int personality;
#ifdef CONFIG_USE_FDPIC
diff --git a/linux-user/signal.c b/linux-user/signal.c
index c750053edd..5064de0c08 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -254,7 +254,7 @@ int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
}
#if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
- !defined(TARGET_X86_64)
+ !defined(TARGET_X86_64) && !defined(TARGET_NIOS2)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/
@@ -3922,6 +3922,240 @@ long do_rt_sigreturn(CPUCRISState *env)
return -TARGET_ENOSYS;
}
+#elif defined(TARGET_NIOS2)
+
+#define MCONTEXT_VERSION 2
+
+struct target_sigcontext {
+ int version;
+ unsigned long gregs[32];
+};
+
+struct target_ucontext {
+ abi_ulong tuc_flags;
+ abi_ulong tuc_link;
+ target_stack_t tuc_stack;
+ struct target_sigcontext tuc_mcontext;
+ target_sigset_t tuc_sigmask; /* mask last for extensibility */
+};
+
+struct target_rt_sigframe {
+ struct target_siginfo info;
+ struct target_ucontext uc;
+};
+
+static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
+{
+ if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
+#ifdef CONFIG_STACK_GROWSUP
+ return target_sigaltstack_used.ss_sp;
+#else
+ return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
+#endif
+ }
+ return sp;
+}
+
+static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
+{
+ unsigned long *gregs = uc->tuc_mcontext.gregs;
+
+ __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
+ __put_user(env->regs[1], &gregs[0]);
+ __put_user(env->regs[2], &gregs[1]);
+ __put_user(env->regs[3], &gregs[2]);
+ __put_user(env->regs[4], &gregs[3]);
+ __put_user(env->regs[5], &gregs[4]);
+ __put_user(env->regs[6], &gregs[5]);
+ __put_user(env->regs[7], &gregs[6]);
+ __put_user(env->regs[8], &gregs[7]);
+ __put_user(env->regs[9], &gregs[8]);
+ __put_user(env->regs[10], &gregs[9]);
+ __put_user(env->regs[11], &gregs[10]);
+ __put_user(env->regs[12], &gregs[11]);
+ __put_user(env->regs[13], &gregs[12]);
+ __put_user(env->regs[14], &gregs[13]);
+ __put_user(env->regs[15], &gregs[14]);
+ __put_user(env->regs[16], &gregs[15]);
+ __put_user(env->regs[17], &gregs[16]);
+ __put_user(env->regs[18], &gregs[17]);
+ __put_user(env->regs[19], &gregs[18]);
+ __put_user(env->regs[20], &gregs[19]);
+ __put_user(env->regs[21], &gregs[20]);
+ __put_user(env->regs[22], &gregs[21]);
+ __put_user(env->regs[23], &gregs[22]);
+ __put_user(env->regs[R_RA], &gregs[23]);
+ __put_user(env->regs[R_FP], &gregs[24]);
+ __put_user(env->regs[R_GP], &gregs[25]);
+ __put_user(env->regs[R_EA], &gregs[27]);
+ __put_user(env->regs[R_SP], &gregs[28]);
+
+ return 0;
+}
+
+static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
+ int *pr2)
+{
+ int temp;
+ abi_ulong off, frame_addr = env->regs[R_SP];
+ unsigned long *gregs = uc->tuc_mcontext.gregs;
+ int err;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ /* current->restart_block.fn = do_no_restart_syscall; */
+
+ __get_user(temp, &uc->tuc_mcontext.version);
+ if (temp != MCONTEXT_VERSION) {
+ return 1;
+ }
+
+ /* restore passed registers */
+ __get_user(env->regs[1], &gregs[0]);
+ __get_user(env->regs[2], &gregs[1]);
+ __get_user(env->regs[3], &gregs[2]);
+ __get_user(env->regs[4], &gregs[3]);
+ __get_user(env->regs[5], &gregs[4]);
+ __get_user(env->regs[6], &gregs[5]);
+ __get_user(env->regs[7], &gregs[6]);
+ __get_user(env->regs[8], &gregs[7]);
+ __get_user(env->regs[9], &gregs[8]);
+ __get_user(env->regs[10], &gregs[9]);
+ __get_user(env->regs[11], &gregs[10]);
+ __get_user(env->regs[12], &gregs[11]);
+ __get_user(env->regs[13], &gregs[12]);
+ __get_user(env->regs[14], &gregs[13]);
+ __get_user(env->regs[15], &gregs[14]);
+ __get_user(env->regs[16], &gregs[15]);
+ __get_user(env->regs[17], &gregs[16]);
+ __get_user(env->regs[18], &gregs[17]);
+ __get_user(env->regs[19], &gregs[18]);
+ __get_user(env->regs[20], &gregs[19]);
+ __get_user(env->regs[21], &gregs[20]);
+ __get_user(env->regs[22], &gregs[21]);
+ __get_user(env->regs[23], &gregs[22]);
+ /* gregs[23] is handled below */
+ /* Verify, should this be settable */
+ __get_user(env->regs[R_FP], &gregs[24]);
+ /* Verify, should this be settable */
+ __get_user(env->regs[R_GP], &gregs[25]);
+ /* Not really necessary no user settable bits */
+ __get_user(temp, &gregs[26]);
+ __get_user(env->regs[R_EA], &gregs[27]);
+
+ __get_user(env->regs[R_RA], &gregs[23]);
+ __get_user(env->regs[R_SP], &gregs[28]);
+
+ off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
+ err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
+ if (err == -EFAULT) {
+ return 1;
+ }
+
+ *pr2 = env->regs[2];
+ return 0;
+}
+
+static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
+ size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = env->regs[R_SP];
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ usp = sigsp(usp, ka);
+
+ /* Verify, is it 32 or 64 bit aligned */
+ return (void *)((usp - frame_size) & -8UL);
+}
+
+static void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set,
+ CPUNios2State *env)
+{
+ struct target_rt_sigframe *frame;
+ int i, err = 0;
+
+ frame = get_sigframe(ka, env, sizeof(*frame));
+
+ if (ka->sa_flags & SA_SIGINFO) {
+ tswap_siginfo(&frame->info, info);
+ }
+
+ /* Create the ucontext. */
+ __put_user(0, &frame->uc.tuc_flags);
+ __put_user(0, &frame->uc.tuc_link);
+ __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
+ __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
+ __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
+ err |= rt_setup_ucontext(&frame->uc, env);
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
+ __put_user((abi_ulong)set->sig[i],
+ (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
+ }
+
+ if (err) {
+ goto give_sigsegv;
+ }
+
+ /* Set up to return from userspace; jump to fixed address sigreturn
+ trampoline on kuser page. */
+ env->regs[R_RA] = (unsigned long) (0x1044);
+
+ /* Set up registers for signal handler */
+ env->regs[R_SP] = (unsigned long) frame;
+ env->regs[4] = (unsigned long) sig;
+ env->regs[5] = (unsigned long) &frame->info;
+ env->regs[6] = (unsigned long) &frame->uc;
+ env->regs[R_EA] = (unsigned long) ka->_sa_handler;
+ return;
+
+give_sigsegv:
+ if (sig == TARGET_SIGSEGV) {
+ ka->_sa_handler = TARGET_SIG_DFL;
+ }
+ force_sigsegv(sig);
+ return;
+}
+
+long do_sigreturn(CPUNios2State *env)
+{
+ trace_user_do_sigreturn(env, 0);
+ fprintf(stderr, "do_sigreturn: not implemented\n");
+ return -TARGET_ENOSYS;
+}
+
+long do_rt_sigreturn(CPUNios2State *env)
+{
+ /* Verify, can we follow the stack back */
+ abi_ulong frame_addr = env->regs[R_SP];
+ struct target_rt_sigframe *frame;
+ sigset_t set;
+ int rval;
+
+ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
+ goto badframe;
+ }
+
+ target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
+ do_sigprocmask(SIG_SETMASK, &set, NULL);
+
+ if (rt_restore_ucontext(env, &frame->uc, &rval)) {
+ goto badframe;
+ }
+
+ unlock_user_struct(frame, frame_addr, 0);
+ return rval;
+
+badframe:
+ unlock_user_struct(frame, frame_addr, 0);
+ force_sig(TARGET_SIGSEGV);
+ return 0;
+}
+/* TARGET_NIOS2 */
+
#elif defined(TARGET_OPENRISC)
struct target_sigcontext {
@@ -5888,6 +6122,195 @@ long do_rt_sigreturn(CPUTLGState *env)
return -TARGET_QEMU_ESIGRETURN;
}
+#elif defined(TARGET_HPPA)
+
+struct target_sigcontext {
+ abi_ulong sc_flags;
+ abi_ulong sc_gr[32];
+ uint64_t sc_fr[32];
+ abi_ulong sc_iasq[2];
+ abi_ulong sc_iaoq[2];
+ abi_ulong sc_sar;
+};
+
+struct target_ucontext {
+ abi_uint tuc_flags;
+ abi_ulong tuc_link;
+ target_stack_t tuc_stack;
+ abi_uint pad[1];
+ struct target_sigcontext tuc_mcontext;
+ target_sigset_t tuc_sigmask;
+};
+
+struct target_rt_sigframe {
+ abi_uint tramp[9];
+ target_siginfo_t info;
+ struct target_ucontext uc;
+ /* hidden location of upper halves of pa2.0 64-bit gregs */
+};
+
+static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
+{
+ int flags = 0;
+ int i;
+
+ /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
+
+ if (env->iaoq_f < TARGET_PAGE_SIZE) {
+ /* In the gateway page, executing a syscall. */
+ flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
+ __put_user(env->gr[31], &sc->sc_iaoq[0]);
+ __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
+ } else {
+ __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
+ __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
+ }
+ __put_user(0, &sc->sc_iasq[0]);
+ __put_user(0, &sc->sc_iasq[1]);
+ __put_user(flags, &sc->sc_flags);
+
+ __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
+ for (i = 1; i < 32; ++i) {
+ __put_user(env->gr[i], &sc->sc_gr[i]);
+ }
+
+ __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
+ for (i = 1; i < 32; ++i) {
+ __put_user(env->fr[i], &sc->sc_fr[i]);
+ }
+
+ __put_user(env->sar, &sc->sc_sar);
+}
+
+static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
+{
+ target_ulong psw;
+ int i;
+
+ __get_user(psw, &sc->sc_gr[0]);
+ cpu_hppa_put_psw(env, psw);
+
+ for (i = 1; i < 32; ++i) {
+ __get_user(env->gr[i], &sc->sc_gr[i]);
+ }
+ for (i = 0; i < 32; ++i) {
+ __get_user(env->fr[i], &sc->sc_fr[i]);
+ }
+ cpu_hppa_loaded_fr0(env);
+
+ __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
+ __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
+ __get_user(env->sar, &sc->sc_sar);
+}
+
+/* No, this doesn't look right, but it's copied straight from the kernel. */
+#define PARISC_RT_SIGFRAME_SIZE32 \
+ ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
+
+static void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPUArchState *env)
+{
+ abi_ulong frame_addr, sp, haddr;
+ struct target_rt_sigframe *frame;
+ int i;
+
+ sp = env->gr[30];
+ if (ka->sa_flags & TARGET_SA_ONSTACK) {
+ if (sas_ss_flags(sp) == 0) {
+ sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
+ }
+ }
+ frame_addr = QEMU_ALIGN_UP(sp, 64);
+ sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
+
+ trace_user_setup_rt_frame(env, frame_addr);
+
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ tswap_siginfo(&frame->info, info);
+ frame->uc.tuc_flags = 0;
+ frame->uc.tuc_link = 0;
+
+ __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
+ __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
+ &frame->uc.tuc_stack.ss_flags);
+ __put_user(target_sigaltstack_used.ss_size,
+ &frame->uc.tuc_stack.ss_size);
+
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
+ __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
+ }
+
+ setup_sigcontext(&frame->uc.tuc_mcontext, env);
+
+ __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
+ __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
+ __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
+ __put_user(0x08000240, frame->tramp + 3); /* nop */
+
+ unlock_user_struct(frame, frame_addr, 1);
+
+ env->gr[2] = h2g(frame->tramp);
+ env->gr[30] = sp;
+ env->gr[26] = sig;
+ env->gr[25] = h2g(&frame->info);
+ env->gr[24] = h2g(&frame->uc);
+
+ haddr = ka->_sa_handler;
+ if (haddr & 2) {
+ /* Function descriptor. */
+ target_ulong *fdesc, dest;
+
+ haddr &= -4;
+ if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
+ goto give_sigsegv;
+ }
+ __get_user(dest, fdesc);
+ __get_user(env->gr[19], fdesc + 1);
+ unlock_user_struct(fdesc, haddr, 1);
+ haddr = dest;
+ }
+ env->iaoq_f = haddr;
+ env->iaoq_b = haddr + 4;;
+ return;
+
+ give_sigsegv:
+ force_sigsegv(sig);
+}
+
+long do_rt_sigreturn(CPUArchState *env)
+{
+ abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
+ struct target_rt_sigframe *frame;
+ sigset_t set;
+
+ trace_user_do_rt_sigreturn(env, frame_addr);
+ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
+ goto badframe;
+ }
+ target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
+ set_sigmask(&set);
+
+ restore_sigcontext(env, &frame->uc.tuc_mcontext);
+ unlock_user_struct(frame, frame_addr, 0);
+
+ if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
+ uc.tuc_stack),
+ 0, env->gr[30]) == -EFAULT) {
+ goto badframe;
+ }
+
+ unlock_user_struct(frame, frame_addr, 0);
+ return -TARGET_QEMU_ESIGRETURN;
+
+ badframe:
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
+}
+
#else
static void setup_frame(int sig, struct target_sigaction *ka,
@@ -5989,7 +6412,8 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
/* prepare the stack frame of the virtual CPU */
#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
|| defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
- || defined(TARGET_PPC64)
+ || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
+ || defined(TARGET_NIOS2)
/* These targets do not have traditional signals. */
setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
#else
diff --git a/linux-user/socket.h b/linux-user/socket.h
index 4dacae6127..7051cd2cf4 100644
--- a/linux-user/socket.h
+++ b/linux-user/socket.h
@@ -205,6 +205,8 @@
#define TARGET_SOCK_MAX (TARGET_SOCK_PACKET + 1)
#define TARGET_SOCK_TYPE_MASK 0xf /* Covers up to TARGET_SOCK_MAX-1. */
+#elif defined(TARGET_HPPA)
+#include <hppa/sockbits.h>
#else
#if defined(TARGET_SPARC)
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 7b77503f94..11a311f9db 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -798,6 +798,12 @@ static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
#ifdef ENOMSG
[ENOMSG] = TARGET_ENOMSG,
#endif
+#ifdef ERKFILL
+ [ERFKILL] = TARGET_ERFKILL,
+#endif
+#ifdef EHWPOISON
+ [EHWPOISON] = TARGET_EHWPOISON,
+#endif
};
static inline int host_to_target_errno(int err)
@@ -5453,6 +5459,8 @@ static IOCTLEntry ioctl_entries[] = {
{ TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
#define IOCTL_SPECIAL(cmd, access, dofn, ...) \
{ TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
+#define IOCTL_IGNORE(cmd) \
+ { TARGET_ ## cmd, 0, #cmd },
#include "ioctls.h"
{ 0, 0, },
};
@@ -5484,6 +5492,10 @@ static abi_long do_ioctl(int fd, int cmd, abi_long arg)
#endif
if (ie->do_ioctl) {
return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
+ } else if (!ie->host_cmd) {
+ /* Some architectures define BSD ioctls in their headers
+ that are not implemented in Linux. */
+ return -TARGET_ENOSYS;
}
switch(arg_type[0]) {
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 0b15466743..4442c22bc3 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -70,7 +70,8 @@
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
|| defined(TARGET_M68K) || defined(TARGET_CRIS) \
|| defined(TARGET_UNICORE32) || defined(TARGET_S390X) \
- || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
+ || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
+ || defined(TARGET_NIOS2)
#define TARGET_IOC_SIZEBITS 14
#define TARGET_IOC_DIRBITS 2
@@ -90,6 +91,15 @@
#define TARGET_IOC_READ 2U
#define TARGET_IOC_WRITE 4U
+#elif defined(TARGET_HPPA)
+
+#define TARGET_IOC_SIZEBITS 14
+#define TARGET_IOC_DIRBITS 2
+
+#define TARGET_IOC_NONE 0U
+#define TARGET_IOC_WRITE 2U
+#define TARGET_IOC_READ 1U
+
#else
#error unsupported CPU
#endif
@@ -417,7 +427,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
|| defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) \
|| defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32) \
|| defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
- || defined(TARGET_TILEGX)
+ || defined(TARGET_TILEGX) || defined(TARGET_HPPA) || defined(TARGET_NIOS2)
#if defined(TARGET_SPARC)
#define TARGET_SA_NOCLDSTOP 8u
@@ -587,6 +597,46 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#define TARGET_SIG_UNBLOCK 2 /* for unblocking signals */
#define TARGET_SIG_SETMASK 3 /* for setting the signal mask */
+#elif defined(TARGET_HPPA)
+
+#define TARGET_SIGHUP 1
+#define TARGET_SIGINT 2
+#define TARGET_SIGQUIT 3
+#define TARGET_SIGILL 4
+#define TARGET_SIGTRAP 5
+#define TARGET_SIGABRT 6
+#define TARGET_SIGIOT 6
+#define TARGET_SIGSTKFLT 7
+#define TARGET_SIGFPE 8
+#define TARGET_SIGKILL 9
+#define TARGET_SIGBUS 10
+#define TARGET_SIGSEGV 11
+#define TARGET_SIGXCPU 12
+#define TARGET_SIGPIPE 13
+#define TARGET_SIGALRM 14
+#define TARGET_SIGTERM 15
+#define TARGET_SIGUSR1 16
+#define TARGET_SIGUSR2 17
+#define TARGET_SIGCHLD 18
+#define TARGET_SIGPWR 19
+#define TARGET_SIGVTALRM 20
+#define TARGET_SIGPROF 21
+#define TARGET_SIGIO 22
+#define TARGET_SIGPOLL TARGET_SIGIO
+#define TARGET_SIGWINCH 23
+#define TARGET_SIGSTOP 24
+#define TARGET_SIGTSTP 25
+#define TARGET_SIGCONT 26
+#define TARGET_SIGTTIN 27
+#define TARGET_SIGTTOU 28
+#define TARGET_SIGURG 29
+#define TARGET_SIGXFSZ 30
+#define TARGET_SIGSYS 31
+
+#define TARGET_SIG_BLOCK 0
+#define TARGET_SIG_UNBLOCK 1
+#define TARGET_SIG_SETMASK 2
+
#else
/* OpenRISC Using the general signals */
@@ -930,9 +980,13 @@ struct target_pollfd {
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SH4)
#define TARGET_SIOCATMARK TARGET_IOR('s', 7, int)
+#define TARGET_SIOCGPGRP TARGET_IOR('s', 9, pid_t)
#else
#define TARGET_SIOCATMARK 0x8905
+#define TARGET_SIOCGPGRP 0x8904
#endif
+#define TARGET_SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define TARGET_SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
/* Networking ioctls */
#define TARGET_SIOCADDRT 0x890B /* add routing table entry */
@@ -1275,6 +1329,16 @@ struct target_winsize {
#define TARGET_MAP_NORESERVE 0x10000 /* no check for reservations */
#define TARGET_MAP_POPULATE 0x20000 /* pop (prefault) pagetables */
#define TARGET_MAP_NONBLOCK 0x40000 /* do not block on IO */
+#elif defined(TARGET_HPPA)
+#define TARGET_MAP_ANONYMOUS 0x10 /* don't use a file */
+#define TARGET_MAP_FIXED 0x04 /* Interpret addr exactly */
+#define TARGET_MAP_GROWSDOWN 0x08000 /* stack-like segment */
+#define TARGET_MAP_DENYWRITE 0x00800 /* ETXTBSY */
+#define TARGET_MAP_EXECUTABLE 0x01000 /* mark it as an executable */
+#define TARGET_MAP_LOCKED 0x02000 /* lock the mapping */
+#define TARGET_MAP_NORESERVE 0x04000 /* no check for reservations */
+#define TARGET_MAP_POPULATE 0x10000 /* pop (prefault) pagetables */
+#define TARGET_MAP_NONBLOCK 0x20000 /* do not block on IO */
#else
#define TARGET_MAP_FIXED 0x10 /* Interpret addr exactly */
#define TARGET_MAP_ANONYMOUS 0x20 /* don't use a file */
@@ -1974,7 +2038,8 @@ struct target_stat {
abi_ulong target_st_ctime_nsec;
unsigned int __unused[2];
};
-#elif defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
+#elif defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) || \
+ defined(TARGET_NIOS2)
/* These are the asm-generic versions of the stat and stat64 structures */
@@ -2025,6 +2090,62 @@ struct target_stat64 {
unsigned int __unused5;
};
+#elif defined(TARGET_HPPA)
+
+struct target_stat {
+ abi_uint st_dev;
+ abi_uint st_ino;
+ abi_ushort st_mode;
+ abi_ushort st_nlink;
+ abi_ushort _res1;
+ abi_ushort _res2;
+ abi_uint st_rdev;
+ abi_int st_size;
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ abi_int st_blksize;
+ abi_int st_blocks;
+ abi_uint _unused1;
+ abi_uint _unused2;
+ abi_uint _unused3;
+ abi_uint _unused4;
+ abi_ushort _unused5;
+ abi_short st_fstype;
+ abi_uint st_realdev;
+ abi_ushort st_basemode;
+ abi_ushort _unused6;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ abi_uint _unused7[3];
+};
+
+#define TARGET_HAS_STRUCT_STAT64
+struct target_stat64 {
+ uint64_t st_dev;
+ abi_uint _pad1;
+ abi_uint _res1;
+ abi_uint st_mode;
+ abi_uint st_nlink;
+ abi_uint st_uid;
+ abi_uint st_gid;
+ uint64_t st_rdev;
+ abi_uint _pad2;
+ int64_t st_size;
+ abi_int st_blksize;
+ int64_t st_blocks;
+ abi_int target_st_atime;
+ abi_uint target_st_atime_nsec;
+ abi_int target_st_mtime;
+ abi_uint target_st_mtime_nsec;
+ abi_int target_st_ctime;
+ abi_uint target_st_ctime_nsec;
+ uint64_t st_ino;
+};
+
#else
#error unsupported CPU
#endif
@@ -2195,6 +2316,12 @@ struct target_statfs64 {
#define TARGET_F_SETLKW 7
#define TARGET_F_SETOWN 24 /* for sockets. */
#define TARGET_F_GETOWN 23 /* for sockets. */
+#elif defined(TARGET_HPPA)
+#define TARGET_F_GETLK 5
+#define TARGET_F_SETLK 6
+#define TARGET_F_SETLKW 7
+#define TARGET_F_GETOWN 11 /* for sockets. */
+#define TARGET_F_SETOWN 12 /* for sockets. */
#else
#define TARGET_F_GETLK 5
#define TARGET_F_SETLK 6
@@ -2217,13 +2344,22 @@ struct target_statfs64 {
#endif
+#if defined(TARGET_HPPA)
+#define TARGET_F_SETSIG 13 /* for sockets. */
+#define TARGET_F_GETSIG 14 /* for sockets. */
+#else
#define TARGET_F_SETSIG 10 /* for sockets. */
#define TARGET_F_GETSIG 11 /* for sockets. */
+#endif
#if defined(TARGET_MIPS)
#define TARGET_F_GETLK64 33 /* using 'struct flock64' */
#define TARGET_F_SETLK64 34
#define TARGET_F_SETLKW64 35
+#elif defined(TARGET_HPPA)
+#define TARGET_F_GETLK64 8 /* using 'struct flock64' */
+#define TARGET_F_SETLK64 9
+#define TARGET_F_SETLKW64 10
#else
#define TARGET_F_GETLK64 12 /* using 'struct flock64' */
#define TARGET_F_SETLK64 13
@@ -2254,6 +2390,20 @@ struct target_statfs64 {
#define TARGET_O_CLOEXEC 010000000
#define TARGET___O_SYNC 020000000
#define TARGET_O_PATH 040000000
+#elif defined(TARGET_HPPA)
+#define TARGET_O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */
+#define TARGET_O_APPEND 000000010
+#define TARGET_O_CREAT 000000400 /* not fcntl */
+#define TARGET_O_EXCL 000002000 /* not fcntl */
+#define TARGET_O_NOCTTY 000400000 /* not fcntl */
+#define TARGET_O_DSYNC 001000000
+#define TARGET_O_LARGEFILE 000004000
+#define TARGET_O_DIRECTORY 000010000 /* must be a directory */
+#define TARGET_O_NOFOLLOW 000000200 /* don't follow links */
+#define TARGET_O_NOATIME 004000000
+#define TARGET_O_CLOEXEC 010000000
+#define TARGET___O_SYNC 000100000
+#define TARGET_O_PATH 020000000
#elif defined(TARGET_ARM) || defined(TARGET_M68K)
#define TARGET_O_DIRECTORY 040000 /* must be a directory */
#define TARGET_O_NOFOLLOW 0100000 /* don't follow links */
@@ -2375,8 +2525,8 @@ struct target_flock {
struct target_flock64 {
short l_type;
short l_whence;
-#if defined(TARGET_PPC) || defined(TARGET_X86_64) \
- || defined(TARGET_MIPS) || defined(TARGET_SPARC) \
+#if defined(TARGET_PPC) || defined(TARGET_X86_64) || defined(TARGET_MIPS) \
+ || defined(TARGET_SPARC) || defined(TARGET_HPPA) \
|| defined(TARGET_MICROBLAZE) || defined(TARGET_TILEGX)
int __pad;
#endif
diff --git a/linux-user/syscall_types.h b/linux-user/syscall_types.h
index af79fbf1de..2b8c0c6df6 100644
--- a/linux-user/syscall_types.h
+++ b/linux-user/syscall_types.h
@@ -14,6 +14,12 @@ STRUCT(serial_icounter_struct,
STRUCT(sockaddr,
TYPE_SHORT, MK_ARRAY(TYPE_CHAR, 14))
+STRUCT(timeval,
+ MK_ARRAY(TYPE_LONG, 2))
+
+STRUCT(timespec,
+ MK_ARRAY(TYPE_LONG, 2))
+
STRUCT(rtentry,
TYPE_ULONG, MK_STRUCT(STRUCT_sockaddr), MK_STRUCT(STRUCT_sockaddr), MK_STRUCT(STRUCT_sockaddr),
TYPE_SHORT, TYPE_SHORT, TYPE_ULONG, TYPE_PTRVOID, TYPE_SHORT, TYPE_PTRVOID,
diff --git a/memory.c b/memory.c
index 33110e9698..2bfc37f65c 100644
--- a/memory.c
+++ b/memory.c
@@ -1603,6 +1603,11 @@ static void memory_region_update_iommu_notify_flags(MemoryRegion *mr)
void memory_region_register_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
+ if (mr->alias) {
+ memory_region_register_iommu_notifier(mr->alias, n);
+ return;
+ }
+
/* We need to register for at least one bitfield */
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
QLIST_INSERT_HEAD(&mr->iommu_notify, n, node);
@@ -1643,6 +1648,10 @@ void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
IOMMUNotifier *n)
{
+ if (mr->alias) {
+ memory_region_unregister_iommu_notifier(mr->alias, n);
+ return;
+ }
QLIST_REMOVE(n, node);
memory_region_update_iommu_notify_flags(mr);
}
diff --git a/migration/Makefile.objs b/migration/Makefile.objs
index 3f3e237142..480dd493a9 100644
--- a/migration/Makefile.objs
+++ b/migration/Makefile.objs
@@ -1,7 +1,6 @@
common-obj-y += migration.o socket.o fd.o exec.o
common-obj-y += tls.o
-common-obj-y += colo-comm.o
-common-obj-$(CONFIG_COLO) += colo.o colo-failover.o
+common-obj-y += colo-comm.o colo.o colo-failover.o
common-obj-y += vmstate.o
common-obj-y += qemu-file.o
common-obj-y += qemu-file-channel.o
diff --git a/migration/migration.c b/migration/migration.c
index f498ab84f2..2766d2f586 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1006,6 +1006,16 @@ static void migrate_fd_cancel(MigrationState *s)
if (s->state == MIGRATION_STATUS_CANCELLING && f) {
qemu_file_shutdown(f);
}
+ if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
+ Error *local_err = NULL;
+
+ bdrv_invalidate_cache_all(&local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ } else {
+ s->block_inactive = false;
+ }
+ }
}
void add_migration_state_change_notifier(Notifier *notify)
@@ -1044,6 +1054,31 @@ bool migration_in_postcopy_after_devices(MigrationState *s)
return migration_in_postcopy(s) && s->postcopy_after_devices;
}
+bool migration_is_idle(MigrationState *s)
+{
+ if (!s) {
+ s = migrate_get_current();
+ }
+
+ switch (s->state) {
+ case MIGRATION_STATUS_NONE:
+ case MIGRATION_STATUS_CANCELLED:
+ case MIGRATION_STATUS_COMPLETED:
+ case MIGRATION_STATUS_FAILED:
+ return true;
+ case MIGRATION_STATUS_SETUP:
+ case MIGRATION_STATUS_CANCELLING:
+ case MIGRATION_STATUS_ACTIVE:
+ case MIGRATION_STATUS_POSTCOPY_ACTIVE:
+ case MIGRATION_STATUS_COLO:
+ return false;
+ case MIGRATION_STATUS__MAX:
+ g_assert_not_reached();
+ }
+
+ return false;
+}
+
MigrationState *migrate_init(const MigrationParams *params)
{
MigrationState *s = migrate_get_current();
@@ -1086,9 +1121,24 @@ MigrationState *migrate_init(const MigrationParams *params)
static GSList *migration_blockers;
-void migrate_add_blocker(Error *reason)
+int migrate_add_blocker(Error *reason, Error **errp)
{
- migration_blockers = g_slist_prepend(migration_blockers, reason);
+ if (only_migratable) {
+ error_propagate(errp, error_copy(reason));
+ error_prepend(errp, "disallowing migration blocker "
+ "(--only_migratable) for: ");
+ return -EACCES;
+ }
+
+ if (migration_is_idle(NULL)) {
+ migration_blockers = g_slist_prepend(migration_blockers, reason);
+ return 0;
+ }
+
+ error_propagate(errp, error_copy(reason));
+ error_prepend(errp, "disallowing migration blocker (migration in "
+ "progress) for: ");
+ return -EBUSY;
}
void migrate_del_blocker(Error *reason)
@@ -1705,6 +1755,7 @@ static void migration_completion(MigrationState *s, int current_active_state,
if (ret >= 0) {
qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
qemu_savevm_state_complete_precopy(s->to_dst_file, false);
+ s->block_inactive = true;
}
}
qemu_mutex_unlock_iothread();
@@ -1755,10 +1806,14 @@ fail_invalidate:
if (s->state == MIGRATION_STATUS_ACTIVE) {
Error *local_err = NULL;
+ qemu_mutex_lock_iothread();
bdrv_invalidate_cache_all(&local_err);
if (local_err) {
error_report_err(local_err);
+ } else {
+ s->block_inactive = false;
}
+ qemu_mutex_unlock_iothread();
}
fail:
@@ -1969,7 +2024,7 @@ void migrate_fd_connect(MigrationState *s)
}
migrate_compress_threads_create();
- qemu_thread_create(&s->thread, "migration", migration_thread, s,
+ qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
QEMU_THREAD_JOINABLE);
s->migration_thread_running = true;
}
diff --git a/migration/ram.c b/migration/ram.c
index a1c8089010..ef8fadfe69 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -45,14 +45,6 @@
#include "qemu/rcu_queue.h"
#include "migration/colo.h"
-#ifdef DEBUG_MIGRATION_RAM
-#define DPRINTF(fmt, ...) \
- do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
static int dirty_rate_high_cnt;
static uint64_t bitmap_sync_count;
@@ -507,10 +499,10 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
TARGET_PAGE_SIZE);
if (encoded_len == 0) {
- DPRINTF("Skipping unmodified page\n");
+ trace_save_xbzrle_page_skipping();
return 0;
} else if (encoded_len == -1) {
- DPRINTF("Overflow\n");
+ trace_save_xbzrle_page_overflow();
acct_info.xbzrle_overflows++;
/* update data in the cache */
if (!last_stage) {
@@ -2020,8 +2012,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
if ((i & 63) == 0) {
uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
if (t1 > MAX_WAIT) {
- DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
- t1, i);
+ trace_ram_save_iterate_big_wait(t1, i);
break;
}
}
@@ -2594,8 +2585,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
wait_for_decompress_done();
rcu_read_unlock();
- DPRINTF("Completed load of VM with exit code %d seq iteration "
- "%" PRIu64 "\n", ret, seq_iter);
+ trace_ram_load_complete(ret, seq_iter);
return ret;
}
diff --git a/migration/savevm.c b/migration/savevm.c
index 0363372acc..455d5bac1e 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -220,17 +220,20 @@ void timer_get(QEMUFile *f, QEMUTimer *ts)
* Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c
*/
-static int get_timer(QEMUFile *f, void *pv, size_t size)
+static int get_timer(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
QEMUTimer *v = pv;
timer_get(f, v);
return 0;
}
-static void put_timer(QEMUFile *f, void *pv, size_t size)
+static int put_timer(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
QEMUTimer *v = pv;
timer_put(f, v);
+
+ return 0;
}
const VMStateInfo vmstate_info_timer = {
@@ -532,6 +535,34 @@ static int calculate_compat_instance_id(const char *idstr)
return instance_id;
}
+static inline MigrationPriority save_state_priority(SaveStateEntry *se)
+{
+ if (se->vmsd) {
+ return se->vmsd->priority;
+ }
+ return MIG_PRI_DEFAULT;
+}
+
+static void savevm_state_handler_insert(SaveStateEntry *nse)
+{
+ MigrationPriority priority = save_state_priority(nse);
+ SaveStateEntry *se;
+
+ assert(priority <= MIG_PRI_MAX);
+
+ QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
+ if (save_state_priority(se) < priority) {
+ break;
+ }
+ }
+
+ if (se) {
+ QTAILQ_INSERT_BEFORE(se, nse, entry);
+ } else {
+ QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry);
+ }
+}
+
/* TODO: Individual devices generally have very little idea about the rest
of the system, so instance_id should be removed/replaced.
Meanwhile pass -1 as instance_id if you do not already have a clearly
@@ -578,8 +609,7 @@ int register_savevm_live(DeviceState *dev,
se->instance_id = instance_id;
}
assert(!se->compat || se->instance_id == 0);
- /* add at the end of list */
- QTAILQ_INSERT_TAIL(&savevm_state.handlers, se, entry);
+ savevm_state_handler_insert(se);
return 0;
}
@@ -662,8 +692,7 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
se->instance_id = instance_id;
}
assert(!se->compat || se->instance_id == 0);
- /* add at the end of list */
- QTAILQ_INSERT_TAIL(&savevm_state.handlers, se, entry);
+ savevm_state_handler_insert(se);
return 0;
}
diff --git a/migration/socket.c b/migration/socket.c
index 11f80b119b..13966f1d26 100644
--- a/migration/socket.c
+++ b/migration/socket.c
@@ -70,22 +70,23 @@ static void socket_connect_data_free(void *opaque)
g_free(data);
}
-static void socket_outgoing_migration(Object *src,
- Error *err,
+static void socket_outgoing_migration(QIOTask *task,
gpointer opaque)
{
struct SocketConnectData *data = opaque;
- QIOChannel *sioc = QIO_CHANNEL(src);
+ QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
trace_migration_socket_outgoing_error(error_get_pretty(err));
data->s->to_dst_file = NULL;
migrate_fd_error(data->s, err);
+ error_free(err);
} else {
trace_migration_socket_outgoing_connected(data->hostname);
migration_channel_connect(data->s, sioc, data->hostname);
}
- object_unref(src);
+ object_unref(OBJECT(sioc));
}
static void socket_start_outgoing_migration(MigrationState *s,
diff --git a/migration/tls.c b/migration/tls.c
index 49ca9a8930..203c11d025 100644
--- a/migration/tls.c
+++ b/migration/tls.c
@@ -61,15 +61,15 @@ migration_tls_get_creds(MigrationState *s,
}
-static void migration_tls_incoming_handshake(Object *src,
- Error *err,
+static void migration_tls_incoming_handshake(QIOTask *task,
gpointer opaque)
{
- QIOChannel *ioc = QIO_CHANNEL(src);
+ QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task));
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
trace_migration_tls_incoming_handshake_error(error_get_pretty(err));
- error_report("%s", error_get_pretty(err));
+ error_report_err(err);
} else {
trace_migration_tls_incoming_handshake_complete();
migration_channel_process_incoming(migrate_get_current(), ioc);
@@ -107,17 +107,18 @@ void migration_tls_channel_process_incoming(MigrationState *s,
}
-static void migration_tls_outgoing_handshake(Object *src,
- Error *err,
+static void migration_tls_outgoing_handshake(QIOTask *task,
gpointer opaque)
{
MigrationState *s = opaque;
- QIOChannel *ioc = QIO_CHANNEL(src);
+ QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task));
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
trace_migration_tls_outgoing_handshake_error(error_get_pretty(err));
s->to_dst_file = NULL;
migrate_fd_error(s, err);
+ error_free(err);
} else {
trace_migration_tls_outgoing_handshake_complete();
migration_channel_connect(s, ioc, NULL);
diff --git a/migration/trace-events b/migration/trace-events
index 94134f700b..48e531d3b8 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -40,6 +40,10 @@ savevm_state_iterate(void) ""
savevm_state_cleanup(void) ""
savevm_state_complete_precopy(void) ""
vmstate_save(const char *idstr, const char *vmsd_name) "%s, %s"
+vmstate_save_state_loop(const char *name, const char *field, int n_elems) "%s/%s[%d]"
+vmstate_save_state_top(const char *idstr) "%s"
+vmstate_subsection_save_loop(const char *name, const char *sub) "%s/%s"
+vmstate_subsection_save_top(const char *idstr) "%s"
vmstate_load(const char *idstr, const char *vmsd_name) "%s, %s"
qemu_announce_self_iter(const char *mac) "%s"
@@ -52,6 +56,10 @@ vmstate_n_elems(const char *name, int n_elems) "%s: %d"
vmstate_subsection_load(const char *parent) "%s"
vmstate_subsection_load_bad(const char *parent, const char *sub, const char *sub2) "%s: %s/%s"
vmstate_subsection_load_good(const char *parent) "%s"
+get_qtailq(const char *name, int version_id) "%s v%d"
+get_qtailq_end(const char *name, const char *reason, int val) "%s %s/%d"
+put_qtailq(const char *name, int version_id) "%s v%d"
+put_qtailq_end(const char *name, const char *reason) "%s %s"
# migration/qemu-file.c
qemu_file_fclose(void) ""
@@ -186,6 +194,10 @@ postcopy_ram_incoming_cleanup_closeuf(void) ""
postcopy_ram_incoming_cleanup_entry(void) ""
postcopy_ram_incoming_cleanup_exit(void) ""
postcopy_ram_incoming_cleanup_join(void) ""
+save_xbzrle_page_skipping(void) ""
+save_xbzrle_page_overflow(void) ""
+ram_save_iterate_big_wait(uint64_t milliconds, int iterations) "big wait: %" PRIu64 " milliseconds, %d iterations"
+ram_load_complete(int ret, uint64_t seq_iter) "exit_code %d seq iteration %" PRIu64
# migration/exec.c
migration_exec_outgoing(const char *cmd) "cmd=%s"
diff --git a/migration/vmstate.c b/migration/vmstate.c
index 0bc9f35ef8..2b2b3a58e6 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -5,7 +5,9 @@
#include "migration/vmstate.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
+#include "qemu/queue.h"
#include "trace.h"
+#include "migration/qjson.h"
static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
void *opaque, QJSON *vmdesc);
@@ -83,6 +85,9 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
trace_vmstate_load_state(vmsd->name, version_id);
if (version_id > vmsd->version_id) {
+ error_report("%s: incoming version_id %d is too new "
+ "for local version_id %d",
+ vmsd->name, version_id, vmsd->version_id);
trace_vmstate_load_state_end(vmsd->name, "too new", -EINVAL);
return -EINVAL;
}
@@ -93,6 +98,9 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
trace_vmstate_load_state_end(vmsd->name, "old path", ret);
return ret;
}
+ error_report("%s: incoming version_id %d is too old "
+ "for local minimum version_id %d",
+ vmsd->name, version_id, vmsd->minimum_version_id);
trace_vmstate_load_state_end(vmsd->name, "too old", -EINVAL);
return -EINVAL;
}
@@ -122,8 +130,7 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd,
ret = vmstate_load_state(f, field->vmsd, addr,
field->vmsd->version_id);
} else {
- ret = field->info->get(f, addr, size);
-
+ ret = field->info->get(f, addr, size, field);
}
if (ret >= 0) {
ret = qemu_file_get_error(f);
@@ -299,6 +306,8 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
{
VMStateField *field = vmsd->fields;
+ trace_vmstate_save_state_top(vmsd->name);
+
if (vmsd->pre_save) {
vmsd->pre_save(opaque);
}
@@ -318,6 +327,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
int64_t old_offset, written_bytes;
QJSON *vmdesc_loop = vmdesc;
+ trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);
for (i = 0; i < n_elems; i++) {
void *addr = base_addr + size * i;
@@ -330,7 +340,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
if (field->flags & VMS_STRUCT) {
vmstate_save_state(f, field->vmsd, addr, vmdesc_loop);
} else {
- field->info->put(f, addr, size);
+ field->info->put(f, addr, size, field, vmdesc_loop);
}
written_bytes = qemu_ftell_fast(f) - old_offset;
@@ -427,11 +437,13 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
const VMStateDescription **sub = vmsd->subsections;
bool subsection_found = false;
+ trace_vmstate_subsection_save_top(vmsd->name);
while (sub && *sub && (*sub)->needed) {
if ((*sub)->needed(opaque)) {
- const VMStateDescription *vmsd = *sub;
+ const VMStateDescription *vmsdsub = *sub;
uint8_t len;
+ trace_vmstate_subsection_save_loop(vmsd->name, vmsdsub->name);
if (vmdesc) {
/* Only create subsection array when we have any */
if (!subsection_found) {
@@ -443,11 +455,11 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
}
qemu_put_byte(f, QEMU_VM_SUBSECTION);
- len = strlen(vmsd->name);
+ len = strlen(vmsdsub->name);
qemu_put_byte(f, len);
- qemu_put_buffer(f, (uint8_t *)vmsd->name, len);
- qemu_put_be32(f, vmsd->version_id);
- vmstate_save_state(f, vmsd, opaque, vmdesc);
+ qemu_put_buffer(f, (uint8_t *)vmsdsub->name, len);
+ qemu_put_be32(f, vmsdsub->version_id);
+ vmstate_save_state(f, vmsdsub, opaque, vmdesc);
if (vmdesc) {
json_end_object(vmdesc);
@@ -463,17 +475,19 @@ static void vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd,
/* bool */
-static int get_bool(QEMUFile *f, void *pv, size_t size)
+static int get_bool(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
bool *v = pv;
*v = qemu_get_byte(f);
return 0;
}
-static void put_bool(QEMUFile *f, void *pv, size_t size)
+static int put_bool(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
bool *v = pv;
qemu_put_byte(f, *v);
+ return 0;
}
const VMStateInfo vmstate_info_bool = {
@@ -484,17 +498,19 @@ const VMStateInfo vmstate_info_bool = {
/* 8 bit int */
-static int get_int8(QEMUFile *f, void *pv, size_t size)
+static int get_int8(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
int8_t *v = pv;
qemu_get_s8s(f, v);
return 0;
}
-static void put_int8(QEMUFile *f, void *pv, size_t size)
+static int put_int8(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
int8_t *v = pv;
qemu_put_s8s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_int8 = {
@@ -505,17 +521,19 @@ const VMStateInfo vmstate_info_int8 = {
/* 16 bit int */
-static int get_int16(QEMUFile *f, void *pv, size_t size)
+static int get_int16(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
int16_t *v = pv;
qemu_get_sbe16s(f, v);
return 0;
}
-static void put_int16(QEMUFile *f, void *pv, size_t size)
+static int put_int16(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
int16_t *v = pv;
qemu_put_sbe16s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_int16 = {
@@ -526,17 +544,19 @@ const VMStateInfo vmstate_info_int16 = {
/* 32 bit int */
-static int get_int32(QEMUFile *f, void *pv, size_t size)
+static int get_int32(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
int32_t *v = pv;
qemu_get_sbe32s(f, v);
return 0;
}
-static void put_int32(QEMUFile *f, void *pv, size_t size)
+static int put_int32(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
int32_t *v = pv;
qemu_put_sbe32s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_int32 = {
@@ -548,7 +568,8 @@ const VMStateInfo vmstate_info_int32 = {
/* 32 bit int. See that the received value is the same than the one
in the field */
-static int get_int32_equal(QEMUFile *f, void *pv, size_t size)
+static int get_int32_equal(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
int32_t *v = pv;
int32_t v2;
@@ -571,7 +592,7 @@ const VMStateInfo vmstate_info_int32_equal = {
* and less than or equal to the one in the field.
*/
-static int get_int32_le(QEMUFile *f, void *pv, size_t size)
+static int get_int32_le(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
int32_t *cur = pv;
int32_t loaded;
@@ -595,17 +616,19 @@ const VMStateInfo vmstate_info_int32_le = {
/* 64 bit int */
-static int get_int64(QEMUFile *f, void *pv, size_t size)
+static int get_int64(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
int64_t *v = pv;
qemu_get_sbe64s(f, v);
return 0;
}
-static void put_int64(QEMUFile *f, void *pv, size_t size)
+static int put_int64(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
int64_t *v = pv;
qemu_put_sbe64s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_int64 = {
@@ -616,17 +639,19 @@ const VMStateInfo vmstate_info_int64 = {
/* 8 bit unsigned int */
-static int get_uint8(QEMUFile *f, void *pv, size_t size)
+static int get_uint8(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
uint8_t *v = pv;
qemu_get_8s(f, v);
return 0;
}
-static void put_uint8(QEMUFile *f, void *pv, size_t size)
+static int put_uint8(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
uint8_t *v = pv;
qemu_put_8s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_uint8 = {
@@ -637,17 +662,19 @@ const VMStateInfo vmstate_info_uint8 = {
/* 16 bit unsigned int */
-static int get_uint16(QEMUFile *f, void *pv, size_t size)
+static int get_uint16(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
uint16_t *v = pv;
qemu_get_be16s(f, v);
return 0;
}
-static void put_uint16(QEMUFile *f, void *pv, size_t size)
+static int put_uint16(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
uint16_t *v = pv;
qemu_put_be16s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_uint16 = {
@@ -658,17 +685,19 @@ const VMStateInfo vmstate_info_uint16 = {
/* 32 bit unsigned int */
-static int get_uint32(QEMUFile *f, void *pv, size_t size)
+static int get_uint32(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
uint32_t *v = pv;
qemu_get_be32s(f, v);
return 0;
}
-static void put_uint32(QEMUFile *f, void *pv, size_t size)
+static int put_uint32(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
uint32_t *v = pv;
qemu_put_be32s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_uint32 = {
@@ -680,7 +709,8 @@ const VMStateInfo vmstate_info_uint32 = {
/* 32 bit uint. See that the received value is the same than the one
in the field */
-static int get_uint32_equal(QEMUFile *f, void *pv, size_t size)
+static int get_uint32_equal(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint32_t *v = pv;
uint32_t v2;
@@ -701,17 +731,19 @@ const VMStateInfo vmstate_info_uint32_equal = {
/* 64 bit unsigned int */
-static int get_uint64(QEMUFile *f, void *pv, size_t size)
+static int get_uint64(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
uint64_t *v = pv;
qemu_get_be64s(f, v);
return 0;
}
-static void put_uint64(QEMUFile *f, void *pv, size_t size)
+static int put_uint64(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
uint64_t *v = pv;
qemu_put_be64s(f, v);
+ return 0;
}
const VMStateInfo vmstate_info_uint64 = {
@@ -723,7 +755,8 @@ const VMStateInfo vmstate_info_uint64 = {
/* 64 bit unsigned int. See that the received value is the same than the one
in the field */
-static int get_uint64_equal(QEMUFile *f, void *pv, size_t size)
+static int get_uint64_equal(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint64_t *v = pv;
uint64_t v2;
@@ -745,7 +778,8 @@ const VMStateInfo vmstate_info_uint64_equal = {
/* 8 bit int. See that the received value is the same than the one
in the field */
-static int get_uint8_equal(QEMUFile *f, void *pv, size_t size)
+static int get_uint8_equal(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint8_t *v = pv;
uint8_t v2;
@@ -767,7 +801,8 @@ const VMStateInfo vmstate_info_uint8_equal = {
/* 16 bit unsigned int int. See that the received value is the same than the one
in the field */
-static int get_uint16_equal(QEMUFile *f, void *pv, size_t size)
+static int get_uint16_equal(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint16_t *v = pv;
uint16_t v2;
@@ -788,7 +823,8 @@ const VMStateInfo vmstate_info_uint16_equal = {
/* floating point */
-static int get_float64(QEMUFile *f, void *pv, size_t size)
+static int get_float64(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
float64 *v = pv;
@@ -796,11 +832,13 @@ static int get_float64(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_float64(QEMUFile *f, void *pv, size_t size)
+static int put_float64(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
uint64_t *v = pv;
qemu_put_be64(f, float64_val(*v));
+ return 0;
}
const VMStateInfo vmstate_info_float64 = {
@@ -811,7 +849,8 @@ const VMStateInfo vmstate_info_float64 = {
/* CPU_DoubleU type */
-static int get_cpudouble(QEMUFile *f, void *pv, size_t size)
+static int get_cpudouble(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
CPU_DoubleU *v = pv;
qemu_get_be32s(f, &v->l.upper);
@@ -819,11 +858,13 @@ static int get_cpudouble(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_cpudouble(QEMUFile *f, void *pv, size_t size)
+static int put_cpudouble(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
CPU_DoubleU *v = pv;
qemu_put_be32s(f, &v->l.upper);
qemu_put_be32s(f, &v->l.lower);
+ return 0;
}
const VMStateInfo vmstate_info_cpudouble = {
@@ -834,17 +875,20 @@ const VMStateInfo vmstate_info_cpudouble = {
/* uint8_t buffers */
-static int get_buffer(QEMUFile *f, void *pv, size_t size)
+static int get_buffer(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint8_t *v = pv;
qemu_get_buffer(f, v, size);
return 0;
}
-static void put_buffer(QEMUFile *f, void *pv, size_t size)
+static int put_buffer(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
uint8_t *v = pv;
qemu_put_buffer(f, v, size);
+ return 0;
}
const VMStateInfo vmstate_info_buffer = {
@@ -856,7 +900,8 @@ const VMStateInfo vmstate_info_buffer = {
/* unused buffers: space that was used for some fields that are
not useful anymore */
-static int get_unused_buffer(QEMUFile *f, void *pv, size_t size)
+static int get_unused_buffer(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint8_t buf[1024];
int block_len;
@@ -869,7 +914,8 @@ static int get_unused_buffer(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_unused_buffer(QEMUFile *f, void *pv, size_t size)
+static int put_unused_buffer(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
static const uint8_t buf[1024];
int block_len;
@@ -879,6 +925,8 @@ static void put_unused_buffer(QEMUFile *f, void *pv, size_t size)
size -= block_len;
qemu_put_buffer(f, buf, block_len);
}
+
+ return 0;
}
const VMStateInfo vmstate_info_unused_buffer = {
@@ -894,7 +942,7 @@ const VMStateInfo vmstate_info_unused_buffer = {
*/
/* This is the number of 64 bit words sent over the wire */
#define BITS_TO_U64S(nr) DIV_ROUND_UP(nr, 64)
-static int get_bitmap(QEMUFile *f, void *pv, size_t size)
+static int get_bitmap(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
unsigned long *bmp = pv;
int i, idx = 0;
@@ -908,7 +956,8 @@ static int get_bitmap(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_bitmap(QEMUFile *f, void *pv, size_t size)
+static int put_bitmap(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
unsigned long *bmp = pv;
int i, idx = 0;
@@ -919,6 +968,8 @@ static void put_bitmap(QEMUFile *f, void *pv, size_t size)
}
qemu_put_be64(f, w);
}
+
+ return 0;
}
const VMStateInfo vmstate_info_bitmap = {
@@ -926,3 +977,71 @@ const VMStateInfo vmstate_info_bitmap = {
.get = get_bitmap,
.put = put_bitmap,
};
+
+/* get for QTAILQ
+ * meta data about the QTAILQ is encoded in a VMStateField structure
+ */
+static int get_qtailq(QEMUFile *f, void *pv, size_t unused_size,
+ VMStateField *field)
+{
+ int ret = 0;
+ const VMStateDescription *vmsd = field->vmsd;
+ /* size of a QTAILQ element */
+ size_t size = field->size;
+ /* offset of the QTAILQ entry in a QTAILQ element */
+ size_t entry_offset = field->start;
+ int version_id = field->version_id;
+ void *elm;
+
+ trace_get_qtailq(vmsd->name, version_id);
+ if (version_id > vmsd->version_id) {
+ error_report("%s %s", vmsd->name, "too new");
+ trace_get_qtailq_end(vmsd->name, "too new", -EINVAL);
+
+ return -EINVAL;
+ }
+ if (version_id < vmsd->minimum_version_id) {
+ error_report("%s %s", vmsd->name, "too old");
+ trace_get_qtailq_end(vmsd->name, "too old", -EINVAL);
+ return -EINVAL;
+ }
+
+ while (qemu_get_byte(f)) {
+ elm = g_malloc(size);
+ ret = vmstate_load_state(f, vmsd, elm, version_id);
+ if (ret) {
+ return ret;
+ }
+ QTAILQ_RAW_INSERT_TAIL(pv, elm, entry_offset);
+ }
+
+ trace_get_qtailq_end(vmsd->name, "end", ret);
+ return ret;
+}
+
+/* put for QTAILQ */
+static int put_qtailq(QEMUFile *f, void *pv, size_t unused_size,
+ VMStateField *field, QJSON *vmdesc)
+{
+ const VMStateDescription *vmsd = field->vmsd;
+ /* offset of the QTAILQ entry in a QTAILQ element*/
+ size_t entry_offset = field->start;
+ void *elm;
+
+ trace_put_qtailq(vmsd->name, vmsd->version_id);
+
+ QTAILQ_RAW_FOREACH(elm, pv, entry_offset) {
+ qemu_put_byte(f, true);
+ vmstate_save_state(f, vmsd, elm, vmdesc);
+ }
+ qemu_put_byte(f, false);
+
+ trace_put_qtailq_end(vmsd->name, "end");
+
+ return 0;
+}
+const VMStateInfo vmstate_info_qtailq = {
+ .name = "qtailq",
+ .get = get_qtailq,
+ .put = put_qtailq,
+};
diff --git a/monitor.c b/monitor.c
index 0841d436b0..8b06b63729 100644
--- a/monitor.c
+++ b/monitor.c
@@ -50,7 +50,7 @@
#include "sysemu/balloon.h"
#include "qemu/timer.h"
#include "migration/migration.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "qemu/acl.h"
#include "sysemu/tpm.h"
#include "qapi/qmp/qerror.h"
@@ -3973,6 +3973,8 @@ void error_vprintf_unless_qmp(const char *fmt, va_list ap)
{
if (cur_mon && !monitor_cur_is_qmp()) {
monitor_vprintf(cur_mon, fmt, ap);
+ } else if (!cur_mon) {
+ vfprintf(stderr, fmt, ap);
}
}
diff --git a/nbd/common.c b/nbd/common.c
index b583a4f4cf..a5f39ea58e 100644
--- a/nbd/common.c
+++ b/nbd/common.c
@@ -78,15 +78,13 @@ ssize_t nbd_wr_syncv(QIOChannel *ioc,
}
-void nbd_tls_handshake(Object *src,
- Error *err,
+void nbd_tls_handshake(QIOTask *task,
void *opaque)
{
struct NBDTLSHandshakeData *data = opaque;
- if (err) {
- TRACE("TLS failed %s", error_get_pretty(err));
- data->error = error_copy(err);
+ if (qio_task_propagate_error(task, &data->error)) {
+ TRACE("TLS failed %s", error_get_pretty(data->error));
}
data->complete = true;
g_main_loop_quit(data->loop);
diff --git a/nbd/nbd-internal.h b/nbd/nbd-internal.h
index eee20abc25..f43d990a05 100644
--- a/nbd/nbd-internal.h
+++ b/nbd/nbd-internal.h
@@ -120,8 +120,7 @@ struct NBDTLSHandshakeData {
};
-void nbd_tls_handshake(Object *src,
- Error *err,
+void nbd_tls_handshake(QIOTask *task,
void *opaque);
#endif
diff --git a/nbd/server.c b/nbd/server.c
index 5b76261666..efe5cb82c9 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1366,19 +1366,18 @@ static void nbd_restart_write(void *opaque)
static void nbd_set_handlers(NBDClient *client)
{
if (client->exp && client->exp->ctx) {
- aio_set_fd_handler(client->exp->ctx, client->sioc->fd,
- true,
+ aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true,
client->can_read ? nbd_read : NULL,
client->send_coroutine ? nbd_restart_write : NULL,
- client);
+ NULL, client);
}
}
static void nbd_unset_handlers(NBDClient *client)
{
if (client->exp && client->exp->ctx) {
- aio_set_fd_handler(client->exp->ctx, client->sioc->fd,
- true, NULL, NULL, NULL);
+ aio_set_fd_handler(client->exp->ctx, client->sioc->fd, true, NULL,
+ NULL, NULL, NULL);
}
}
diff --git a/net/Makefile.objs b/net/Makefile.objs
index 2a80df5fa7..2e2fd43014 100644
--- a/net/Makefile.objs
+++ b/net/Makefile.objs
@@ -19,3 +19,4 @@ common-obj-y += filter-mirror.o
common-obj-y += colo-compare.o
common-obj-y += colo.o
common-obj-y += filter-rewriter.o
+common-obj-y += filter-replay.o
diff --git a/net/checksum.c b/net/checksum.c
index 23323b0760..4da72a6a6c 100644
--- a/net/checksum.c
+++ b/net/checksum.c
@@ -22,17 +22,22 @@
uint32_t net_checksum_add_cont(int len, uint8_t *buf, int seq)
{
- uint32_t sum = 0;
+ uint32_t sum1 = 0, sum2 = 0;
int i;
- for (i = seq; i < seq + len; i++) {
- if (i & 1) {
- sum += (uint32_t)buf[i - seq];
- } else {
- sum += (uint32_t)buf[i - seq] << 8;
- }
+ for (i = 0; i < len - 1; i += 2) {
+ sum1 += (uint32_t)buf[i];
+ sum2 += (uint32_t)buf[i + 1];
+ }
+ if (i < len) {
+ sum1 += (uint32_t)buf[i];
+ }
+
+ if (seq & 1) {
+ return sum1 + (sum2 << 8);
+ } else {
+ return sum2 + (sum1 << 8);
}
- return sum;
}
uint16_t net_checksum_finish(uint32_t sum)
diff --git a/net/filter-replay.c b/net/filter-replay.c
new file mode 100644
index 0000000000..cff65f86e5
--- /dev/null
+++ b/net/filter-replay.c
@@ -0,0 +1,92 @@
+/*
+ * filter-replay.c
+ *
+ * Copyright (c) 2010-2016 Institute for System Programming
+ * of the Russian Academy of Sciences.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "clients.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "qemu/iov.h"
+#include "qemu/log.h"
+#include "qemu/timer.h"
+#include "qapi/visitor.h"
+#include "net/filter.h"
+#include "sysemu/replay.h"
+
+#define TYPE_FILTER_REPLAY "filter-replay"
+
+#define FILTER_REPLAY(obj) \
+ OBJECT_CHECK(NetFilterReplayState, (obj), TYPE_FILTER_REPLAY)
+
+struct NetFilterReplayState {
+ NetFilterState nfs;
+ ReplayNetState *rns;
+};
+typedef struct NetFilterReplayState NetFilterReplayState;
+
+static ssize_t filter_replay_receive_iov(NetFilterState *nf,
+ NetClientState *sndr,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt, NetPacketSent *sent_cb)
+{
+ NetFilterReplayState *nfrs = FILTER_REPLAY(nf);
+ switch (replay_mode) {
+ case REPLAY_MODE_RECORD:
+ if (nf->netdev == sndr) {
+ replay_net_packet_event(nfrs->rns, flags, iov, iovcnt);
+ return iov_size(iov, iovcnt);
+ }
+ return 0;
+ case REPLAY_MODE_PLAY:
+ /* Drop all packets in replay mode.
+ Packets from the log will be injected by the replay module. */
+ return iov_size(iov, iovcnt);
+ default:
+ /* Pass all the packets. */
+ return 0;
+ }
+}
+
+static void filter_replay_instance_init(Object *obj)
+{
+ NetFilterReplayState *nfrs = FILTER_REPLAY(obj);
+ nfrs->rns = replay_register_net(&nfrs->nfs);
+}
+
+static void filter_replay_instance_finalize(Object *obj)
+{
+ NetFilterReplayState *nfrs = FILTER_REPLAY(obj);
+ replay_unregister_net(nfrs->rns);
+}
+
+static void filter_replay_class_init(ObjectClass *oc, void *data)
+{
+ NetFilterClass *nfc = NETFILTER_CLASS(oc);
+
+ nfc->receive_iov = filter_replay_receive_iov;
+}
+
+static const TypeInfo filter_replay_info = {
+ .name = TYPE_FILTER_REPLAY,
+ .parent = TYPE_NETFILTER,
+ .class_init = filter_replay_class_init,
+ .instance_init = filter_replay_instance_init,
+ .instance_finalize = filter_replay_instance_finalize,
+ .instance_size = sizeof(NetFilterReplayState),
+};
+
+static void filter_replay_register_types(void)
+{
+ type_register_static(&filter_replay_info);
+}
+
+type_init(filter_replay_register_types);
diff --git a/net/tap.c b/net/tap.c
index b6896a7b7c..979e622e60 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -696,6 +696,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
"tap: open vhost char device failed");
return;
}
+ fcntl(vhostfd, F_SETFL, O_NONBLOCK);
}
options.opaque = (void *)(uintptr_t)vhostfd;
@@ -788,8 +789,8 @@ int net_init_tap(const Netdev *netdev, const char *name,
return -1;
}
} else if (tap->has_fds) {
- char **fds = g_new0(char *, MAX_TAP_QUEUES);
- char **vhost_fds = g_new0(char *, MAX_TAP_QUEUES);
+ char **fds;
+ char **vhost_fds;
int nfds, nvhosts;
if (tap->has_ifname || tap->has_script || tap->has_downscript ||
@@ -801,6 +802,9 @@ int net_init_tap(const Netdev *netdev, const char *name,
return -1;
}
+ fds = g_new0(char *, MAX_TAP_QUEUES);
+ vhost_fds = g_new0(char *, MAX_TAP_QUEUES);
+
nfds = get_fds(tap->fds, fds, MAX_TAP_QUEUES);
if (tap->has_vhostfds) {
nvhosts = get_fds(tap->vhostfds, vhost_fds, MAX_TAP_QUEUES);
diff --git a/numa.c b/numa.c
index 9c09e45e7d..9f56be960f 100644
--- a/numa.c
+++ b/numa.c
@@ -25,6 +25,7 @@
#include "qemu/osdep.h"
#include "sysemu/numa.h"
#include "exec/cpu-common.h"
+#include "exec/ramlist.h"
#include "qemu/bitmap.h"
#include "qom/cpu.h"
#include "qemu/error-report.h"
@@ -266,20 +267,19 @@ static char *enumerate_cpus(unsigned long *cpus, int max_cpus)
static void validate_numa_cpus(void)
{
int i;
- DECLARE_BITMAP(seen_cpus, MAX_CPUMASK_BITS);
+ unsigned long *seen_cpus = bitmap_new(max_cpus);
- bitmap_zero(seen_cpus, MAX_CPUMASK_BITS);
for (i = 0; i < nb_numa_nodes; i++) {
- if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu,
- MAX_CPUMASK_BITS)) {
+ if (bitmap_intersects(seen_cpus, numa_info[i].node_cpu, max_cpus)) {
bitmap_and(seen_cpus, seen_cpus,
- numa_info[i].node_cpu, MAX_CPUMASK_BITS);
+ numa_info[i].node_cpu, max_cpus);
error_report("CPU(s) present in multiple NUMA nodes: %s",
enumerate_cpus(seen_cpus, max_cpus));
+ g_free(seen_cpus);
exit(EXIT_FAILURE);
}
bitmap_or(seen_cpus, seen_cpus,
- numa_info[i].node_cpu, MAX_CPUMASK_BITS);
+ numa_info[i].node_cpu, max_cpus);
}
if (!bitmap_full(seen_cpus, max_cpus)) {
@@ -291,12 +291,17 @@ static void validate_numa_cpus(void)
"in NUMA config");
g_free(msg);
}
+ g_free(seen_cpus);
}
void parse_numa_opts(MachineClass *mc)
{
int i;
+ for (i = 0; i < MAX_NODES; i++) {
+ numa_info[i].node_cpu = bitmap_new(max_cpus);
+ }
+
if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) {
exit(1);
}
@@ -362,7 +367,7 @@ void parse_numa_opts(MachineClass *mc)
numa_set_mem_ranges();
for (i = 0; i < nb_numa_nodes; i++) {
- if (!bitmap_empty(numa_info[i].node_cpu, MAX_CPUMASK_BITS)) {
+ if (!bitmap_empty(numa_info[i].node_cpu, max_cpus)) {
break;
}
}
@@ -397,6 +402,7 @@ void numa_post_machine_init(void)
CPU_FOREACH(cpu) {
for (i = 0; i < nb_numa_nodes; i++) {
+ assert(cpu->cpu_index < max_cpus);
if (test_bit(cpu->cpu_index, numa_info[i].node_cpu)) {
cpu->numa_node = i;
}
@@ -518,6 +524,9 @@ static int query_memdev(Object *obj, void *opaque)
m->value = g_malloc0(sizeof(*m->value));
+ m->value->id = object_property_get_str(obj, "id", NULL);
+ m->value->has_id = !!m->value->id;
+
m->value->size = object_property_get_int(obj, "size",
&error_abort);
m->value->merge = object_property_get_bool(obj, "merge",
@@ -555,6 +564,8 @@ int numa_get_node_for_cpu(int idx)
{
int i;
+ assert(idx < max_cpus);
+
for (i = 0; i < nb_numa_nodes; i++) {
if (test_bit(idx, numa_info[i].node_cpu)) {
break;
@@ -562,3 +573,31 @@ int numa_get_node_for_cpu(int idx)
}
return i;
}
+
+void ram_block_notifier_add(RAMBlockNotifier *n)
+{
+ QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
+}
+
+void ram_block_notifier_remove(RAMBlockNotifier *n)
+{
+ QLIST_REMOVE(n, next);
+}
+
+void ram_block_notify_add(void *host, size_t size)
+{
+ RAMBlockNotifier *notifier;
+
+ QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
+ notifier->ram_block_added(notifier, host, size);
+ }
+}
+
+void ram_block_notify_remove(void *host, size_t size)
+{
+ RAMBlockNotifier *notifier;
+
+ QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
+ notifier->ram_block_removed(notifier, host, size);
+ }
+}
diff --git a/qapi-schema.json b/qapi-schema.json
index a0d3b5d7c5..ac55f4a41b 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -1,6 +1,53 @@
# -*- Mode: Python -*-
+##
+# = Introduction
+#
+# This document describes all commands currently supported by QMP.
+#
+# Most of the time their usage is exactly the same as in the user Monitor, this
+# means that any other document which also describe commands (the manpage,
+# QEMU's manual, etc) can and should be consulted.
+#
+# QMP has two types of commands: regular and query commands. Regular commands
+# usually change the Virtual Machine's state someway, while query commands just
+# return information. The sections below are divided accordingly.
+#
+# It's important to observe that all communication examples are formatted in
+# a reader-friendly way, so that they're easier to understand. However, in real
+# protocol usage, they're emitted as a single line.
+#
+# Also, the following notation is used to denote data flow:
+#
+# Example:
+#
+# | -> data issued by the Client
+# | <- Server data response
+#
+# Please, refer to the QMP specification (docs/qmp-spec.txt) for
+# detailed information on the Server command and response formats.
+#
+# = Stability Considerations
+#
+# The current QMP command set (described in this file) may be useful for a
+# number of use cases, however it's limited and several commands have bad
+# defined semantics, specially with regard to command completion.
+#
+# These problems are going to be solved incrementally in the next QEMU releases
+# and we're going to establish a deprecation policy for badly defined commands.
+#
+# If you're planning to adopt QMP, please observe the following:
+#
+# 1. The deprecation policy will take effect and be documented soon, please
+# check the documentation of each used command as soon as a new release of
+# QEMU is available
#
-# QAPI Schema
+# 2. DO NOT rely on anything which is not explicit documented
+#
+# 3. Errors, in special, are not documented. Applications should NOT check
+# for specific errors classes or data (it's strongly recommended to only
+# check for the "error" key)
+#
+##
# QAPI common definitions
{ 'include': 'qapi/common.json' }
@@ -21,6 +68,10 @@
{ 'include': 'qapi/introspect.json' }
##
+# = QMP commands
+##
+
+##
# @qmp_capabilities:
#
# Enable QMP capabilities.
@@ -85,6 +136,13 @@
# Returns: nothing on success.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "add_client", "arguments": { "protocol": "vnc",
+# "fdname": "myclient" } }
+# <- { "return": {} }
+#
##
{ 'command': 'add_client',
'data': { 'protocol': 'str', 'fdname': 'str', '*skipauth': 'bool',
@@ -109,6 +167,12 @@
# Returns: @NameInfo of the guest
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-name" }
+# <- { "return": { "name": "qemu-name" } }
+#
##
{ 'command': 'query-name', 'returns': 'NameInfo' }
@@ -133,6 +197,12 @@
# Returns: @KvmInfo
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-kvm" }
+# <- { "return": { "enabled": true, "present": true } }
+#
##
{ 'command': 'query-kvm', 'returns': 'KvmInfo' }
@@ -213,13 +283,21 @@
# Returns: @StatusInfo reflecting all VCPUs
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-status" }
+# <- { "return": { "running": true,
+# "singlestep": false,
+# "status": "running" } }
+#
##
{ 'command': 'query-status', 'returns': 'StatusInfo' }
##
# @UuidInfo:
#
-# Guest UUID information.
+# Guest UUID information (Universally Unique Identifier).
#
# @UUID: the UUID of the guest
#
@@ -237,6 +315,12 @@
# Returns: The @UuidInfo for the guest
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-uuid" }
+# <- { "return": { "UUID": "550e8400-e29b-41d4-a716-446655440000" } }
+#
##
{ 'command': 'query-uuid', 'returns': 'UuidInfo' }
@@ -270,6 +354,30 @@
# Returns: a list of @ChardevInfo
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-chardev" }
+# <- {
+# "return": [
+# {
+# "label": "charchannel0",
+# "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.agent,server",
+# "frontend-open": false
+# },
+# {
+# "label": "charmonitor",
+# "filename": "unix:/var/lib/libvirt/qemu/seabios.rhel6.monitor,server",
+# "frontend-open": true
+# },
+# {
+# "label": "charserial0",
+# "filename": "pty:/dev/pts/2",
+# "frontend-open": true
+# }
+# ]
+# }
+#
##
{ 'command': 'query-chardev', 'returns': ['ChardevInfo'] }
@@ -292,6 +400,27 @@
# Returns: a list of @ChardevBackendInfo
#
# Since: 2.0
+#
+# Example:
+#
+# -> { "execute": "query-chardev-backends" }
+# <- {
+# "return":[
+# {
+# "name":"udp"
+# },
+# {
+# "name":"tcp"
+# },
+# {
+# "name":"unix"
+# },
+# {
+# "name":"spiceport"
+# }
+# ]
+# }
+#
##
{ 'command': 'query-chardev-backends', 'returns': ['ChardevBackendInfo'] }
@@ -328,6 +457,15 @@
# Returns: Nothing on success
#
# Since: 1.4
+#
+# Example:
+#
+# -> { "execute": "ringbuf-write",
+# "arguments": { "device": "foo",
+# "data": "abcdefgh",
+# "format": "utf8" } }
+# <- { "return": {} }
+#
##
{ 'command': 'ringbuf-write',
'data': {'device': 'str', 'data': 'str',
@@ -355,6 +493,15 @@
# Returns: data read from the device
#
# Since: 1.4
+#
+# Example:
+#
+# -> { "execute": "ringbuf-read",
+# "arguments": { "device": "foo",
+# "size": 1000,
+# "format": "utf8" } }
+# <- { "return": "abcdefgh" }
+#
##
{ 'command': 'ringbuf-read',
'data': {'device': 'str', 'size': 'int', '*format': 'DataFormat'},
@@ -379,6 +526,23 @@
# Returns: A list of @EventInfo for all supported events
#
# Since: 1.2.0
+#
+# Example:
+#
+# -> { "execute": "query-events" }
+# <- {
+# "return": [
+# {
+# "name":"SHUTDOWN"
+# },
+# {
+# "name":"RESET"
+# }
+# ]
+# }
+#
+# Note: This example has been shortened as the real response is too long.
+#
##
{ 'command': 'query-events', 'returns': ['EventInfo'] }
@@ -538,11 +702,124 @@
##
# @query-migrate:
#
-# Returns information about current migration process.
+# Returns information about current migration process. If migration
+# is active there will be another json-object with RAM migration
+# status and if block migration is active another one with block
+# migration status.
#
# Returns: @MigrationInfo
#
# Since: 0.14.0
+#
+# Example:
+#
+# 1. Before the first migration
+#
+# -> { "execute": "query-migrate" }
+# <- { "return": {} }
+#
+# 2. Migration is done and has succeeded
+#
+# -> { "execute": "query-migrate" }
+# <- { "return": {
+# "status": "completed",
+# "ram":{
+# "transferred":123,
+# "remaining":123,
+# "total":246,
+# "total-time":12345,
+# "setup-time":12345,
+# "downtime":12345,
+# "duplicate":123,
+# "normal":123,
+# "normal-bytes":123456,
+# "dirty-sync-count":15
+# }
+# }
+# }
+#
+# 3. Migration is done and has failed
+#
+# -> { "execute": "query-migrate" }
+# <- { "return": { "status": "failed" } }
+#
+# 4. Migration is being performed and is not a block migration:
+#
+# -> { "execute": "query-migrate" }
+# <- {
+# "return":{
+# "status":"active",
+# "ram":{
+# "transferred":123,
+# "remaining":123,
+# "total":246,
+# "total-time":12345,
+# "setup-time":12345,
+# "expected-downtime":12345,
+# "duplicate":123,
+# "normal":123,
+# "normal-bytes":123456,
+# "dirty-sync-count":15
+# }
+# }
+# }
+#
+# 5. Migration is being performed and is a block migration:
+#
+# -> { "execute": "query-migrate" }
+# <- {
+# "return":{
+# "status":"active",
+# "ram":{
+# "total":1057024,
+# "remaining":1053304,
+# "transferred":3720,
+# "total-time":12345,
+# "setup-time":12345,
+# "expected-downtime":12345,
+# "duplicate":123,
+# "normal":123,
+# "normal-bytes":123456,
+# "dirty-sync-count":15
+# },
+# "disk":{
+# "total":20971520,
+# "remaining":20880384,
+# "transferred":91136
+# }
+# }
+# }
+#
+# 6. Migration is being performed and XBZRLE is active:
+#
+# -> { "execute": "query-migrate" }
+# <- {
+# "return":{
+# "status":"active",
+# "capabilities" : [ { "capability": "xbzrle", "state" : true } ],
+# "ram":{
+# "total":1057024,
+# "remaining":1053304,
+# "transferred":3720,
+# "total-time":12345,
+# "setup-time":12345,
+# "expected-downtime":12345,
+# "duplicate":10,
+# "normal":3333,
+# "normal-bytes":3412992,
+# "dirty-sync-count":15
+# },
+# "xbzrle-cache":{
+# "cache-size":67108864,
+# "bytes":20971520,
+# "pages":2444343,
+# "cache-miss":2244,
+# "cache-miss-rate":0.123,
+# "overflow":34434
+# }
+# }
+# }
+#
##
{ 'command': 'query-migrate', 'returns': 'MigrationInfo' }
@@ -616,6 +893,12 @@
# @capabilities: json array of capability modifications to make
#
# Since: 1.2
+#
+# Example:
+#
+# -> { "execute": "migrate-set-capabilities" , "arguments":
+# { "capabilities": [ { "capability": "xbzrle", "state": true } ] } }
+#
##
{ 'command': 'migrate-set-capabilities',
'data': { 'capabilities': ['MigrationCapabilityStatus'] } }
@@ -628,6 +911,21 @@
# Returns: @MigrationCapabilitiesStatus
#
# Since: 1.2
+#
+# Example:
+#
+# -> { "execute": "query-migrate-capabilities" }
+# <- { "return": [
+# {"state": false, "capability": "xbzrle"},
+# {"state": false, "capability": "rdma-pin-all"},
+# {"state": false, "capability": "auto-converge"},
+# {"state": false, "capability": "zero-blocks"},
+# {"state": false, "capability": "compress"},
+# {"state": true, "capability": "events"},
+# {"state": false, "capability": "postcopy-ram"},
+# {"state": false, "capability": "x-colo"}
+# ]}
+#
##
{ 'command': 'query-migrate-capabilities', 'returns': ['MigrationCapabilityStatus']}
@@ -696,6 +994,12 @@
# Set various migration parameters. See MigrationParameters for details.
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "migrate-set-parameters" ,
+# "arguments": { "compress-level": 1 } }
+#
##
{ 'command': 'migrate-set-parameters', 'boxed': true,
'data': 'MigrationParameters' }
@@ -767,6 +1071,21 @@
# Returns: @MigrationParameters
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "query-migrate-parameters" }
+# <- { "return": {
+# "decompress-threads": 2,
+# "cpu-throttle-increment": 10,
+# "compress-threads": 8,
+# "compress-level": 1,
+# "cpu-throttle-initial": 20,
+# "max-bandwidth": 33554432,
+# "downtime-limit": 300
+# }
+# }
+#
##
{ 'command': 'query-migrate-parameters',
'returns': 'MigrationParameters' }
@@ -785,6 +1104,15 @@
# @cert-subject: #optional server certificate subject
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "client_migrate_info",
+# "arguments": { "protocol": "spice",
+# "hostname": "virt42.lab.kraxel.org",
+# "port": 1234 } }
+# <- { "return": {} }
+#
##
{ 'command': 'client_migrate_info',
'data': { 'protocol': 'str', 'hostname': 'str', '*port': 'int',
@@ -798,6 +1126,12 @@
# command.
#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "migrate-start-postcopy" }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate-start-postcopy' }
@@ -870,6 +1204,12 @@
# then takes over server operation to become the service VM.
#
# Since: 2.8
+#
+# Example:
+#
+# -> { "execute": "x-colo-lost-heartbeat" }
+# <- { "return": {} }
+#
##
{ 'command': 'x-colo-lost-heartbeat' }
@@ -900,6 +1240,26 @@
# Returns: a list of @MouseInfo for each device
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-mice" }
+# <- { "return": [
+# {
+# "name":"QEMU Microsoft Mouse",
+# "index":0,
+# "current":false,
+# "absolute":false
+# },
+# {
+# "name":"QEMU PS/2 Mouse",
+# "index":1,
+# "current":true,
+# "absolute":true
+# }
+# ]
+# }
+#
##
{ 'command': 'query-mice', 'returns': ['MouseInfo'] }
@@ -1024,6 +1384,32 @@
# Returns: a list of @CpuInfo for each virtual CPU
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-cpus" }
+# <- { "return": [
+# {
+# "CPU":0,
+# "current":true,
+# "halted":false,
+# "qom_path":"/machine/unattached/device[0]",
+# "arch":"x86",
+# "pc":3227107138,
+# "thread_id":3134
+# },
+# {
+# "CPU":1,
+# "current":false,
+# "halted":true,
+# "qom_path":"/machine/unattached/device[2]",
+# "arch":"x86",
+# "pc":7108165,
+# "thread_id":3135
+# }
+# ]
+# }
+#
##
{ 'command': 'query-cpus', 'returns': ['CpuInfo'] }
@@ -1053,6 +1439,22 @@
# Returns: a list of @IOThreadInfo for each iothread
#
# Since: 2.0
+#
+# Example:
+#
+# -> { "execute": "query-iothreads" }
+# <- { "return": [
+# {
+# "id":"iothread0",
+# "thread-id":3134
+# },
+# {
+# "id":"iothread1",
+# "thread-id":3135
+# }
+# ]
+# }
+#
##
{ 'command': 'query-iothreads', 'returns': ['IOThreadInfo'] }
@@ -1235,6 +1637,26 @@
# Returns: @VncInfo
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-vnc" }
+# <- { "return": {
+# "enabled":true,
+# "host":"0.0.0.0",
+# "service":"50402",
+# "auth":"vnc",
+# "family":"ipv4",
+# "clients":[
+# {
+# "host":"127.0.0.1",
+# "service":"50401",
+# "family":"ipv4"
+# }
+# ]
+# }
+# }
+#
##
{ 'command': 'query-vnc', 'returns': 'VncInfo' }
@@ -1332,7 +1754,7 @@
# @enabled: true if the SPICE server is enabled, false otherwise
#
# @migrated: true if the last guest migration completed and spice
-# migration had completed as well. false otherwise.
+# migration had completed as well. false otherwise. (since 1.4)
#
# @host: #optional The hostname the SPICE server is bound to. This depends on
# the name resolution on the host and may be an IP address.
@@ -1350,9 +1772,7 @@
#
# @mouse-mode: The mode in which the mouse cursor is displayed currently. Can
# be determined by the client or the server, or unknown if spice
-# server doesn't provide this information.
-#
-# Since: 1.1
+# server doesn't provide this information. (since: 1.1)
#
# @channels: a list of @SpiceChannel for each active spice channel
#
@@ -1371,6 +1791,40 @@
# Returns: @SpiceInfo
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-spice" }
+# <- { "return": {
+# "enabled": true,
+# "auth": "spice",
+# "port": 5920,
+# "tls-port": 5921,
+# "host": "0.0.0.0",
+# "channels": [
+# {
+# "port": "54924",
+# "family": "ipv4",
+# "channel-type": 1,
+# "connection-id": 1804289383,
+# "host": "127.0.0.1",
+# "channel-id": 0,
+# "tls": true
+# },
+# {
+# "port": "36710",
+# "family": "ipv4",
+# "channel-type": 4,
+# "connection-id": 1804289383,
+# "host": "127.0.0.1",
+# "channel-id": 0,
+# "tls": false
+# },
+# [ ... more channels follow ... ]
+# ]
+# }
+# }
+#
##
{ 'command': 'query-spice', 'returns': 'SpiceInfo' }
@@ -1392,11 +1846,22 @@
# Return information about the balloon device.
#
# Returns: @BalloonInfo on success
+#
# If the balloon driver is enabled but not functional because the KVM
# kernel module cannot support it, KvmMissingCap
+#
# If no balloon device is present, DeviceNotActive
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-balloon" }
+# <- { "return": {
+# "actual": 1073741824,
+# }
+# }
+#
##
{ 'command': 'query-balloon', 'returns': 'BalloonInfo' }
@@ -1423,6 +1888,8 @@
# @type: 'io' if the region is a PIO region
# 'memory' if the region is a MMIO region
#
+# @size: memory size
+#
# @prefetch: #optional if @type is 'memory', true if the memory is prefetchable
#
# @mem_type_64: #optional if @type is 'memory', true if the BAR is 64-bit
@@ -1556,9 +2023,144 @@
#
# Return information about the PCI bus topology of the guest.
#
-# Returns: a list of @PciInfo for each PCI bus
+# Returns: a list of @PciInfo for each PCI bus. Each bus is
+# represented by a json-object, which has a key with a json-array of
+# all PCI devices attached to it. Each device is represented by a
+# json-object.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-pci" }
+# <- { "return": [
+# {
+# "bus": 0,
+# "devices": [
+# {
+# "bus": 0,
+# "qdev_id": "",
+# "slot": 0,
+# "class_info": {
+# "class": 1536,
+# "desc": "Host bridge"
+# },
+# "id": {
+# "device": 32902,
+# "vendor": 4663
+# },
+# "function": 0,
+# "regions": [
+# ]
+# },
+# {
+# "bus": 0,
+# "qdev_id": "",
+# "slot": 1,
+# "class_info": {
+# "class": 1537,
+# "desc": "ISA bridge"
+# },
+# "id": {
+# "device": 32902,
+# "vendor": 28672
+# },
+# "function": 0,
+# "regions": [
+# ]
+# },
+# {
+# "bus": 0,
+# "qdev_id": "",
+# "slot": 1,
+# "class_info": {
+# "class": 257,
+# "desc": "IDE controller"
+# },
+# "id": {
+# "device": 32902,
+# "vendor": 28688
+# },
+# "function": 1,
+# "regions": [
+# {
+# "bar": 4,
+# "size": 16,
+# "address": 49152,
+# "type": "io"
+# }
+# ]
+# },
+# {
+# "bus": 0,
+# "qdev_id": "",
+# "slot": 2,
+# "class_info": {
+# "class": 768,
+# "desc": "VGA controller"
+# },
+# "id": {
+# "device": 4115,
+# "vendor": 184
+# },
+# "function": 0,
+# "regions": [
+# {
+# "prefetch": true,
+# "mem_type_64": false,
+# "bar": 0,
+# "size": 33554432,
+# "address": 4026531840,
+# "type": "memory"
+# },
+# {
+# "prefetch": false,
+# "mem_type_64": false,
+# "bar": 1,
+# "size": 4096,
+# "address": 4060086272,
+# "type": "memory"
+# },
+# {
+# "prefetch": false,
+# "mem_type_64": false,
+# "bar": 6,
+# "size": 65536,
+# "address": -1,
+# "type": "memory"
+# }
+# ]
+# },
+# {
+# "bus": 0,
+# "qdev_id": "",
+# "irq": 11,
+# "slot": 4,
+# "class_info": {
+# "class": 1280,
+# "desc": "RAM controller"
+# },
+# "id": {
+# "device": 6900,
+# "vendor": 4098
+# },
+# "function": 0,
+# "regions": [
+# {
+# "bar": 0,
+# "size": 32,
+# "address": 49280,
+# "type": "io"
+# }
+# ]
+# }
+# ]
+# }
+# ]
+# }
+#
+# Note: This example has been shortened as the real response is too long.
+#
##
{ 'command': 'query-pci', 'returns': ['PciInfo'] }
@@ -1571,6 +2173,11 @@
# unexpected.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "quit" }
+# <- { "return": {} }
##
{ 'command': 'quit' }
@@ -1585,6 +2192,12 @@
# state. In "inmigrate" state, it will ensure that the guest
# remains paused once migration finishes, as if the -S option was
# passed on the command line.
+#
+# Example:
+#
+# -> { "execute": "stop" }
+# <- { "return": {} }
+#
##
{ 'command': 'stop' }
@@ -1594,6 +2207,12 @@
# Performs a hard reset of a guest.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "system_reset" }
+# <- { "return": {} }
+#
##
{ 'command': 'system_reset' }
@@ -1608,6 +2227,11 @@
# returning does not indicate that a guest has accepted the request or
# that it has shut down. Many guests will respond to this command by
# prompting the user in some way.
+# Example:
+#
+# -> { "execute": "system_powerdown" }
+# <- { "return": {} }
+#
##
{ 'command': 'system_powerdown' }
@@ -1632,6 +2256,12 @@
# Returns: Nothing on success
#
# Since: 1.5
+#
+# Example:
+#
+# -> { "execute": "cpu-add", "arguments": { "id": 2 } }
+# <- { "return": {} }
+#
##
{ 'command': 'cpu-add', 'data': {'id': 'int'} }
@@ -1654,6 +2284,15 @@
# Since: 0.14.0
#
# Notes: Errors were not reliably returned until 1.1
+#
+# Example:
+#
+# -> { "execute": "memsave",
+# "arguments": { "val": 10,
+# "size": 100,
+# "filename": "/tmp/virtual-mem-dump" } }
+# <- { "return": {} }
+#
##
{ 'command': 'memsave',
'data': {'val': 'int', 'size': 'int', 'filename': 'str', '*cpu-index': 'int'} }
@@ -1674,6 +2313,15 @@
# Since: 0.14.0
#
# Notes: Errors were not reliably returned until 1.1
+#
+# Example:
+#
+# -> { "execute": "pmemsave",
+# "arguments": { "val": 10,
+# "size": 100,
+# "filename": "/tmp/physical-mem-dump" } }
+# <- { "return": {} }
+#
##
{ 'command': 'pmemsave',
'data': {'val': 'int', 'size': 'int', 'filename': 'str'} }
@@ -1694,6 +2342,12 @@
# this case, the effect of the command is to make sure the guest
# starts once migration finishes, removing the effect of the -S
# command line option if it was passed.
+#
+# Example:
+#
+# -> { "execute": "cont" }
+# <- { "return": {} }
+#
##
{ 'command': 'cont' }
@@ -1705,6 +2359,12 @@
# Since: 1.1
#
# Returns: nothing.
+#
+# Example:
+#
+# -> { "execute": "system_wakeup" }
+# <- { "return": {} }
+#
##
{ 'command': 'system_wakeup' }
@@ -1712,12 +2372,19 @@
# @inject-nmi:
#
# Injects a Non-Maskable Interrupt into the default CPU (x86/s390) or all CPUs (ppc64).
+# The command fails when the guest doesn't support injecting.
#
# Returns: If successful, nothing
#
# Since: 0.14.0
#
# Note: prior to 2.1, this command was only supported for x86 and s390 VMs
+#
+# Example:
+#
+# -> { "execute": "inject-nmi" }
+# <- { "return": {} }
+#
##
{ 'command': 'inject-nmi' }
@@ -1738,6 +2405,13 @@
# Notes: Not all network adapters support setting link status. This command
# will succeed even if the network adapter does not support link status
# notification.
+#
+# Example:
+#
+# -> { "execute": "set_link",
+# "arguments": { "name": "e1000.0", "up": false } }
+# <- { "return": {} }
+#
##
{ 'command': 'set_link', 'data': {'name': 'str', 'up': 'bool'} }
@@ -1758,6 +2432,12 @@
# size independent of this command.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "balloon", "arguments": { "value": 536870912 } }
+# <- { "return": {} }
+#
##
{ 'command': 'balloon', 'data': {'value': 'int'} }
@@ -1795,28 +2475,29 @@
# @TransactionAction:
#
# A discriminated record of operations that can be performed with
-# @transaction.
+# @transaction. Action @type can be:
#
-# Since: 1.1
+# - @abort: since 1.6
+# - @block-dirty-bitmap-add: since 2.5
+# - @block-dirty-bitmap-clear: since 2.5
+# - @blockdev-backup: since 2.3
+# - @blockdev-snapshot: since 2.5
+# - @blockdev-snapshot-internal-sync: since 1.7
+# - @blockdev-snapshot-sync: since 1.1
+# - @drive-backup: since 1.6
#
-# drive-backup since 1.6
-# abort since 1.6
-# blockdev-snapshot-internal-sync since 1.7
-# blockdev-backup since 2.3
-# blockdev-snapshot since 2.5
-# block-dirty-bitmap-add since 2.5
-# block-dirty-bitmap-clear since 2.5
+# Since: 1.1
##
{ 'union': 'TransactionAction',
'data': {
- 'blockdev-snapshot': 'BlockdevSnapshot',
- 'blockdev-snapshot-sync': 'BlockdevSnapshotSync',
- 'drive-backup': 'DriveBackup',
- 'blockdev-backup': 'BlockdevBackup',
'abort': 'Abort',
- 'blockdev-snapshot-internal-sync': 'BlockdevSnapshotInternal',
'block-dirty-bitmap-add': 'BlockDirtyBitmapAdd',
- 'block-dirty-bitmap-clear': 'BlockDirtyBitmap'
+ 'block-dirty-bitmap-clear': 'BlockDirtyBitmap',
+ 'blockdev-backup': 'BlockdevBackup',
+ 'blockdev-snapshot': 'BlockdevSnapshot',
+ 'blockdev-snapshot-internal-sync': 'BlockdevSnapshotInternal',
+ 'blockdev-snapshot-sync': 'BlockdevSnapshotSync',
+ 'drive-backup': 'DriveBackup'
} }
##
@@ -1843,6 +2524,28 @@
# operation fails, then the entire set of actions will be abandoned and the
# appropriate error returned.
#
+# For external snapshots, the dictionary contains the device, the file to use for
+# the new snapshot, and the format. The default format, if not specified, is
+# qcow2.
+#
+# Each new snapshot defaults to being created by QEMU (wiping any
+# contents if the file already exists), but it is also possible to reuse
+# an externally-created file. In the latter case, you should ensure that
+# the new image file has the same contents as the current one; QEMU cannot
+# perform any meaningful check. Typically this is achieved by using the
+# current image file as the backing file for the new image.
+#
+# On failure, the original disks pre-snapshot attempt will be used.
+#
+# For internal snapshots, the dictionary contains the device and the snapshot's
+# name. If an internal snapshot matching name already exists, the request will
+# be rejected. Only some image formats support it, for example, qcow2, rbd,
+# and sheepdog.
+#
+# On failure, qemu will try delete the newly created internal snapshot in the
+# transaction. When an I/O error occurs during deletion, the user needs to fix
+# it later with qemu-img or other command.
+#
# @actions: List of @TransactionAction;
# information needed for the respective operations.
#
@@ -1851,6 +2554,7 @@
# for additional detail.
#
# Returns: nothing on success
+#
# Errors depend on the operations of the transaction
#
# Note: The transaction aborts on the first failure. Therefore, there will be
@@ -1858,6 +2562,28 @@
# subsequent actions will not have been attempted.
#
# Since: 1.1
+#
+# Example:
+#
+# -> { "execute": "transaction",
+# "arguments": { "actions": [
+# { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd0",
+# "snapshot-file": "/some/place/my-image",
+# "format": "qcow2" } },
+# { "type": "blockdev-snapshot-sync", "data" : { "node-name": "myfile",
+# "snapshot-file": "/some/place/my-image2",
+# "snapshot-node-name": "node3432",
+# "mode": "existing",
+# "format": "qcow2" } },
+# { "type": "blockdev-snapshot-sync", "data" : { "device": "ide-hd1",
+# "snapshot-file": "/some/place/my-image2",
+# "mode": "existing",
+# "format": "qcow2" } },
+# { "type": "blockdev-snapshot-internal-sync", "data" : {
+# "device": "ide-hd2",
+# "name": "snapshot0" } } ] } }
+# <- { "return": {} }
+#
##
{ 'command': 'transaction',
'data': { 'actions': [ 'TransactionAction' ],
@@ -1879,15 +2605,26 @@
# Since: 0.14.0
#
# Notes: This command only exists as a stop-gap. Its use is highly
-# discouraged. The semantics of this command are not guaranteed.
+# discouraged. The semantics of this command are not
+# guaranteed: this means that command names, arguments and
+# responses can change or be removed at ANY time. Applications
+# that rely on long term stability guarantees should NOT
+# use this command.
#
# Known limitations:
#
-# o This command is stateless, this means that commands that depend
+# * This command is stateless, this means that commands that depend
# on state information (such as getfd) might not work
#
-# o Commands that prompt the user for data (eg. 'cont' when the block
-# device is encrypted) don't currently work
+# * Commands that prompt the user for data (eg. 'cont' when the block
+# device is encrypted) don't currently work
+#
+# Example:
+#
+# -> { "execute": "human-monitor-command",
+# "arguments": { "command-line": "info kvm" } }
+# <- { "return": "kvm support: enabled\r\n" }
+#
##
{ 'command': 'human-monitor-command',
'data': {'command-line': 'str', '*cpu-index': 'int'},
@@ -1903,6 +2640,12 @@
# Notes: This command succeeds even if there is no migration process running.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "migrate_cancel" }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate_cancel' }
@@ -1918,6 +2661,12 @@
# Notes: This command is deprecated in favor of 'migrate-set-parameters'
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "migrate_set_downtime", "arguments": { "value": 0.1 } }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate_set_downtime', 'data': {'value': 'number'} }
@@ -1926,20 +2675,26 @@
#
# Set maximum speed for migration.
#
-# @value: maximum speed in bytes.
+# @value: maximum speed in bytes per second.
#
# Returns: nothing on success
#
# Notes: This command is deprecated in favor of 'migrate-set-parameters'
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "migrate_set_speed", "arguments": { "value": 1024 } }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate_set_speed', 'data': {'value': 'int'} }
##
# @migrate-set-cache-size:
#
-# Set XBZRLE cache size
+# Set cache size to be used by XBZRLE migration
#
# @value: cache size in bytes
#
@@ -1949,17 +2704,30 @@
# Returns: nothing on success
#
# Since: 1.2
+#
+# Example:
+#
+# -> { "execute": "migrate-set-cache-size",
+# "arguments": { "value": 536870912 } }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate-set-cache-size', 'data': {'value': 'int'} }
##
# @query-migrate-cache-size:
#
-# query XBZRLE cache size
+# Query migration XBZRLE cache size
#
# Returns: XBZRLE cache size in bytes
#
# Since: 1.2
+#
+# Example:
+#
+# -> { "execute": "query-migrate-cache-size" }
+# <- { "return": 67108864 }
+#
##
{ 'command': 'query-migrate-cache-size', 'returns': 'int' }
@@ -2076,6 +2844,13 @@
# If Spice is not enabled, DeviceNotFound
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "set_password", "arguments": { "protocol": "vnc",
+# "password": "secret" } }
+# <- { "return": {} }
+#
##
{ 'command': 'set_password',
'data': {'protocol': 'str', 'password': 'str', '*connected': 'str'} }
@@ -2102,6 +2877,13 @@
# coordinate server time with client time. It is not recommended to
# use the absolute time version of the @time parameter unless you're
# sure you are on the same machine as the QEMU instance.
+#
+# Example:
+#
+# -> { "execute": "expire_password", "arguments": { "protocol": "vnc",
+# "time": "+60" } }
+# <- { "return": {} }
+#
##
{ 'command': 'expire_password', 'data': {'protocol': 'str', 'time': 'str'} }
@@ -2152,6 +2934,23 @@
# change-vnc-password.
#
# Since: 0.14.0
+#
+# Example:
+#
+# 1. Change a removable medium
+#
+# -> { "execute": "change",
+# "arguments": { "device": "ide1-cd0",
+# "target": "/srv/images/Fedora-12-x86_64-DVD.iso" } }
+# <- { "return": {} }
+#
+# 2. Change VNC password
+#
+# -> { "execute": "change",
+# "arguments": { "device": "vnc", "target": "password",
+# "arg": "foobar1" } }
+# <- { "return": {} }
+#
##
{ 'command': 'change',
'data': {'device': 'str', 'target': 'str', '*arg': 'str'} }
@@ -2234,6 +3033,22 @@
# Returns: nothing on success
#
# Since: 0.14.0
+#
+# Notes:
+#
+# 1. The 'query-migrate' command should be used to check migration's progress
+# and final result (this information is provided by the 'status' member)
+#
+# 2. All boolean arguments default to false
+#
+# 3. The user Monitor's "detach" argument is invalid in QMP and should not
+# be used
+#
+# Example:
+#
+# -> { "execute": "migrate", "arguments": { "uri": "tcp:0:4446" } }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate',
'data': {'uri': 'str', '*blk': 'bool', '*inc': 'bool', '*detach': 'bool' } }
@@ -2250,9 +3065,24 @@
# Returns: nothing on success
#
# Since: 2.3
-# Note: It's a bad idea to use a string for the uri, but it needs to stay
-# compatible with -incoming and the format of the uri is already exposed
-# above libvirt
+#
+# Notes:
+#
+# 1. It's a bad idea to use a string for the uri, but it needs to stay
+# compatible with -incoming and the format of the uri is already exposed
+# above libvirt.
+#
+# 2. QEMU must be started with -incoming defer to allow migrate-incoming to
+# be used.
+#
+# 3. The uri format is the same as for -incoming
+#
+# Example:
+#
+# -> { "execute": "migrate-incoming",
+# "arguments": { "uri": "tcp::4446" } }
+# <- { "return": {} }
+#
##
{ 'command': 'migrate-incoming', 'data': {'uri': 'str' } }
@@ -2269,6 +3099,13 @@
# Returns: Nothing on success
#
# Since: 1.1
+#
+# Example:
+#
+# -> { "execute": "xen-save-devices-state",
+# "arguments": { "filename": "/tmp/save" } }
+# <- { "return": {} }
+#
##
{ 'command': 'xen-save-devices-state', 'data': {'filename': 'str'} }
@@ -2282,6 +3119,13 @@
# Returns: nothing
#
# Since: 1.3
+#
+# Example:
+#
+# -> { "execute": "xen-set-global-dirty-log",
+# "arguments": { "enable": true } }
+# <- { "return": {} }
+#
##
{ 'command': 'xen-set-global-dirty-log', 'data': { 'enable': 'bool' } }
@@ -2292,7 +3136,7 @@
#
# @bus: #optional the device's parent bus (device tree path)
#
-# @id: the device's ID, must be unique
+# @id: #optional the device's ID, must be unique
#
# Additional arguments depend on the type.
#
@@ -2314,7 +3158,7 @@
# "mac": "52:54:00:12:34:56" } }
# <- { "return": {} }
#
-# TODO This command effectively bypasses QAPI completely due to its
+# TODO: This command effectively bypasses QAPI completely due to its
# "additional arguments" business. It shouldn't have been added to
# the schema in this form. It should be qapified properly, or
# replaced by a properly qapified command.
@@ -2322,7 +3166,7 @@
# Since: 0.13
##
{ 'command': 'device_add',
- 'data': {'driver': 'str', 'id': 'str'},
+ 'data': {'driver': 'str', '*bus': 'str', '*id': 'str'},
'gen': false } # so we can get the additional arguments
##
@@ -2330,7 +3174,7 @@
#
# Remove a device from a guest
#
-# @id: the name or QOM path of the device
+# @id: the device's ID or QOM path
#
# Returns: Nothing on success
# If @id is not a valid device, DeviceNotFound
@@ -2343,6 +3187,17 @@
# for all devices.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "device_del",
+# "arguments": { "id": "net1" } }
+# <- { "return": {} }
+#
+# -> { "execute": "device_del",
+# "arguments": { "id": "/machine/peripheral-anon/device[0]" } }
+# <- { "return": {} }
+#
##
{ 'command': 'device_del', 'data': {'id': 'str'} }
@@ -2409,9 +3264,18 @@
# @length is not allowed to be specified with non-elf @format at the
# same time (since 2.0)
#
+# Note: All boolean arguments default to false
+#
# Returns: nothing on success
#
# Since: 1.2
+#
+# Example:
+#
+# -> { "execute": "dump-guest-memory",
+# "arguments": { "protocol": "fd:dump" } }
+# <- { "return": {} }
+#
##
{ 'command': 'dump-guest-memory',
'data': { 'paging': 'bool', 'protocol': 'str', '*detach': 'bool',
@@ -2462,6 +3326,13 @@
# Returns: A @DumpStatus object showing the dump status.
#
# Since: 2.6
+#
+# Example:
+#
+# -> { "execute": "query-dump" }
+# <- { "return": { "status": "active", "completed": 1024000,
+# "total": 2048000 } }
+#
##
{ 'command': 'query-dump', 'returns': 'DumpQueryResult' }
@@ -2485,6 +3356,13 @@
# dump-guest-memory
#
# Since: 2.0
+#
+# Example:
+#
+# -> { "execute": "query-dump-guest-memory-capability" }
+# <- { "return": { "formats":
+# ["elf", "kdump-zlib", "kdump-lzo", "kdump-snappy"] }
+#
##
{ 'command': 'query-dump-guest-memory-capability',
'returns': 'DumpGuestMemoryCapability' }
@@ -2499,6 +3377,13 @@
# This command is only supported on s390 architecture.
#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "dump-skeys",
+# "arguments": { "filename": "/tmp/skeys" } }
+# <- { "return": {} }
+#
##
{ 'command': 'dump-skeys',
'data': { 'filename': 'str' } }
@@ -2515,7 +3400,7 @@
#
# Additional arguments depend on the type.
#
-# TODO This command effectively bypasses QAPI completely due to its
+# TODO: This command effectively bypasses QAPI completely due to its
# "additional arguments" business. It shouldn't have been added to
# the schema in this form. It should be qapified properly, or
# replaced by a properly qapified command.
@@ -2524,6 +3409,14 @@
#
# Returns: Nothing on success
# If @type is not a valid network backend, DeviceNotFound
+#
+# Example:
+#
+# -> { "execute": "netdev_add",
+# "arguments": { "type": "user", "id": "netdev1",
+# "dnssearch": "example.org" } }
+# <- { "return": {} }
+#
##
{ 'command': 'netdev_add',
'data': {'type': 'str', 'id': 'str'},
@@ -2540,6 +3433,12 @@
# If @id is not a valid network backend, DeviceNotFound
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "netdev_del", "arguments": { "id": "netdev1" } }
+# <- { "return": {} }
+#
##
{ 'command': 'netdev_del', 'data': {'id': 'str'} }
@@ -2558,6 +3457,14 @@
# Error if @qom-type is not a valid class name
#
# Since: 2.0
+#
+# Example:
+#
+# -> { "execute": "object-add",
+# "arguments": { "qom-type": "rng-random", "id": "rng1",
+# "props": { "filename": "/dev/hwrng" } } }
+# <- { "return": {} }
+#
##
{ 'command': 'object-add',
'data': {'qom-type': 'str', 'id': 'str', '*props': 'any'} }
@@ -2573,6 +3480,12 @@
# Error if @id is not a valid id for a QOM object
#
# Since: 2.0
+#
+# Example:
+#
+# -> { "execute": "object-del", "arguments": { "id": "rng1" } }
+# <- { "return": {} }
+#
##
{ 'command': 'object-del', 'data': {'id': 'str'} }
@@ -3070,6 +3983,10 @@
#
# @port: port part of the address, or lowest port if @to is present
#
+# @numeric: #optional true if the host/port are guaranteed to be numeric,
+# false if name resolution should be attempted. Defaults to false.
+# (Since 2.9)
+#
# @to: highest port to try
#
# @ipv4: whether to accept IPv4 addresses, default try both IPv4 and IPv6
@@ -3084,6 +4001,7 @@
'data': {
'host': 'str',
'port': 'str',
+ '*numeric': 'bool',
'*to': 'uint16',
'*ipv4': 'bool',
'*ipv6': 'bool' } }
@@ -3147,8 +4065,15 @@
# Notes: If @fdname already exists, the file descriptor assigned to
# it will be closed and replaced by the received file
# descriptor.
+#
# The 'closefd' command can be used to explicitly close the
# file descriptor when it is no longer needed.
+#
+# Example:
+#
+# -> { "execute": "getfd", "arguments": { "fdname": "fd1" } }
+# <- { "return": {} }
+#
##
{ 'command': 'getfd', 'data': {'fdname': 'str'} }
@@ -3162,6 +4087,12 @@
# Returns: Nothing on success
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "closefd", "arguments": { "fdname": "fd1" } }
+# <- { "return": {} }
+#
##
{ 'command': 'closefd', 'data': {'fdname': 'str'} }
@@ -3221,6 +4152,9 @@
# @unavailable-features: #optional List of properties that prevent
# the CPU model from running in the current
# host. (since 2.8)
+# @typename: Type name that can be used as argument to @device-list-properties,
+# to introspect properties configurable using -cpu or -global.
+# (since 2.9)
#
# @unavailable-features is a list of QOM property names that
# represent CPU model attributes that prevent the CPU from running.
@@ -3242,7 +4176,7 @@
##
{ 'struct': 'CpuDefinitionInfo',
'data': { 'name': 'str', '*migration-safe': 'bool', 'static': 'bool',
- '*unavailable-features': [ 'str' ] } }
+ '*unavailable-features': [ 'str' ], 'typename': 'str' } }
##
# @query-cpu-definitions:
@@ -3511,7 +4445,9 @@
# @opaque: #optional A free-form string that can be used to describe the fd.
#
# Returns: @AddfdInfo on success
+#
# If file descriptor was not received, FdNotSupplied
+#
# If @fdset-id is a negative value, InvalidParameterValue
#
# Notes: The list of fd sets is shared by all monitor connections.
@@ -3519,6 +4455,12 @@
# If @fdset-id is not specified, a new fd set will be created.
#
# Since: 1.2.0
+#
+# Example:
+#
+# -> { "execute": "add-fd", "arguments": { "fdset-id": 1 } }
+# <- { "return": { "fdset-id": 1, "fd": 3 } }
+#
##
{ 'command': 'add-fd', 'data': {'*fdset-id': 'int', '*opaque': 'str'},
'returns': 'AddfdInfo' }
@@ -3541,6 +4483,12 @@
#
# If @fd is not specified, all file descriptors in @fdset-id
# will be removed.
+#
+# Example:
+#
+# -> { "execute": "remove-fd", "arguments": { "fdset-id": 1, "fd": 3 } }
+# <- { "return": {} }
+#
##
{ 'command': 'remove-fd', 'data': {'fdset-id': 'int', '*fd': 'int'} }
@@ -3583,6 +4531,37 @@
#
# Note: The list of fd sets is shared by all monitor connections.
#
+# Example:
+#
+# -> { "execute": "query-fdsets" }
+# <- { "return": [
+# {
+# "fds": [
+# {
+# "fd": 30,
+# "opaque": "rdonly:/path/to/file"
+# },
+# {
+# "fd": 24,
+# "opaque": "rdwr:/path/to/file"
+# }
+# ],
+# "fdset-id": 1
+# },
+# {
+# "fds": [
+# {
+# "fd": 28
+# },
+# {
+# "fd": 29
+# }
+# ],
+# "fdset-id": 0
+# }
+# ]
+# }
+#
##
{ 'command': 'query-fdsets', 'returns': ['FdsetInfo'] }
@@ -3612,16 +4591,19 @@
##
# @QKeyCode:
#
+# An enumeration of key name.
+#
+# This is used by the @send-key command.
+#
# @unmapped: since 2.0
# @pause: since 2.0
# @ro: since 2.4
# @kp_comma: since 2.4
# @kp_equals: since 2.6
# @power: since 2.6
-#
-# An enumeration of key name.
-#
-# This is used by the send-key command.
+# @hiragana: since 2.9
+# @henkan: since 2.9
+# @yen: since 2.9
#
# Since: 1.3.0
#
@@ -3642,7 +4624,8 @@
'kp_9', 'less', 'f11', 'f12', 'print', 'home', 'pgup', 'pgdn', 'end',
'left', 'up', 'down', 'right', 'insert', 'delete', 'stop', 'again',
'props', 'undo', 'front', 'copy', 'open', 'paste', 'find', 'cut',
- 'lf', 'help', 'meta_l', 'meta_r', 'compose', 'pause', 'ro',
+ 'lf', 'help', 'meta_l', 'meta_r', 'compose', 'pause',
+ 'ro', 'hiragana', 'henkan', 'yen',
'kp_comma', 'kp_equals', 'power' ] }
##
@@ -3675,6 +4658,14 @@
#
# Since: 1.3.0
#
+# Example:
+#
+# -> { "execute": "send-key",
+# "arguments": { "keys": [ { "type": "qcode", "data": "ctrl" },
+# { "type": "qcode", "data": "alt" },
+# { "type": "qcode", "data": "delete" } ] } }
+# <- { "return": {} }
+#
##
{ 'command': 'send-key',
'data': { 'keys': ['KeyValue'], '*hold-time': 'int' } }
@@ -3689,6 +4680,13 @@
# Returns: Nothing on success
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "screendump",
+# "arguments": { "filename": "/tmp/image" } }
+# <- { "return": {} }
+#
##
{ 'command': 'screendump', 'data': {'filename': 'str'} }
@@ -3913,6 +4911,25 @@
# Returns: ChardevReturn.
#
# Since: 1.4
+#
+# Example:
+#
+# -> { "execute" : "chardev-add",
+# "arguments" : { "id" : "foo",
+# "backend" : { "type" : "null", "data" : {} } } }
+# <- { "return": {} }
+#
+# -> { "execute" : "chardev-add",
+# "arguments" : { "id" : "bar",
+# "backend" : { "type" : "file",
+# "data" : { "out" : "/tmp/bar.log" } } } }
+# <- { "return": {} }
+#
+# -> { "execute" : "chardev-add",
+# "arguments" : { "id" : "baz",
+# "backend" : { "type" : "pty", "data" : {} } } }
+# <- { "return": { "pty" : "/dev/pty/42" } }
+#
##
{ 'command': 'chardev-add', 'data': {'id' : 'str',
'backend' : 'ChardevBackend' },
@@ -3928,6 +4945,12 @@
# Returns: Nothing on success
#
# Since: 1.4
+#
+# Example:
+#
+# -> { "execute": "chardev-remove", "arguments": { "id" : "foo" } }
+# <- { "return": {} }
+#
##
{ 'command': 'chardev-remove', 'data': {'id': 'str'} }
@@ -3950,6 +4973,12 @@
# Returns: a list of TpmModel
#
# Since: 1.5
+#
+# Example:
+#
+# -> { "execute": "query-tpm-models" }
+# <- { "return": [ "tpm-tis" ] }
+#
##
{ 'command': 'query-tpm-models', 'returns': ['TpmModel'] }
@@ -3972,6 +5001,12 @@
# Returns: a list of TpmType
#
# Since: 1.5
+#
+# Example:
+#
+# -> { "execute": "query-tpm-types" }
+# <- { "return": [ "passthrough" ] }
+#
##
{ 'command': 'query-tpm-types', 'returns': ['TpmType'] }
@@ -4028,6 +5063,25 @@
# Returns: @TPMInfo on success
#
# Since: 1.5
+#
+# Example:
+#
+# -> { "execute": "query-tpm" }
+# <- { "return":
+# [
+# { "model": "tpm-tis",
+# "options":
+# { "type": "passthrough",
+# "data":
+# { "cancel-path": "/sys/class/misc/tpm0/device/cancel",
+# "path": "/dev/tpm0"
+# }
+# },
+# "id": "tpm0"
+# }
+# ]
+# }
+#
##
{ 'command': 'query-tpm', 'returns': ['TPMInfo'] }
@@ -4154,6 +5208,28 @@
# @option). Returns an error if the given @option doesn't exist.
#
# Since: 1.5
+#
+# Example:
+#
+# -> { "execute": "query-command-line-options",
+# "arguments": { "option": "option-rom" } }
+# <- { "return": [
+# {
+# "parameters": [
+# {
+# "name": "romfile",
+# "type": "string"
+# },
+# {
+# "name": "bootindex",
+# "type": "number"
+# }
+# ],
+# "option": "option-rom"
+# }
+# ]
+# }
+#
##
{'command': 'query-command-line-options', 'data': { '*option': 'str' },
'returns': ['CommandLineOptionInfo'] }
@@ -4275,6 +5351,36 @@
# isn't a NIC.
#
# Since: 1.6
+#
+# Example:
+#
+# -> { "execute": "query-rx-filter", "arguments": { "name": "vnet0" } }
+# <- { "return": [
+# {
+# "promiscuous": true,
+# "name": "vnet0",
+# "main-mac": "52:54:00:12:34:56",
+# "unicast": "normal",
+# "vlan": "normal",
+# "vlan-table": [
+# 4,
+# 0
+# ],
+# "unicast-table": [
+# ],
+# "multicast": "normal",
+# "multicast-overflow": false,
+# "unicast-overflow": false,
+# "multicast-table": [
+# "01:00:5e:00:00:01",
+# "33:33:00:00:00:01",
+# "33:33:ff:12:34:56"
+# ],
+# "broadcast-allowed": false
+# }
+# ]
+# }
+#
##
{ 'command': 'query-rx-filter', 'data': { '*name': 'str' },
'returns': ['RxFilterInfo'] }
@@ -4373,9 +5479,9 @@
#
# Returns: Nothing on success.
#
-# The @display and @head parameters can be used to send the input
-# event to specific input devices in case (a) multiple input devices
-# of the same kind are added to the virtual machine and (b) you have
+# The @device and @head parameters can be used to send the input event
+# to specific input devices in case (a) multiple input devices of the
+# same kind are added to the virtual machine and (b) you have
# configured input routing (see docs/multiseat.txt) for those input
# devices. The parameters work exactly like the device and head
# properties of input devices. If @device is missing, only devices
@@ -4385,6 +5491,48 @@
# precedence.
#
# Since: 2.6
+#
+# Note: The consoles are visible in the qom tree, under
+# /backend/console[$index]. They have a device link and head property,
+# so it is possible to map which console belongs to which device and
+# display.
+#
+# Example:
+#
+# 1. Press left mouse button.
+#
+# -> { "execute": "input-send-event",
+# "arguments": { "device": "video0",
+# "events": [ { "type": "btn",
+# "data" : { "down": true, "button": "left" } } ] } }
+# <- { "return": {} }
+#
+# -> { "execute": "input-send-event",
+# "arguments": { "device": "video0",
+# "events": [ { "type": "btn",
+# "data" : { "down": false, "button": "left" } } ] } }
+# <- { "return": {} }
+#
+# 2. Press ctrl-alt-del.
+#
+# -> { "execute": "input-send-event",
+# "arguments": { "events": [
+# { "type": "key", "data" : { "down": true,
+# "key": {"type": "qcode", "data": "ctrl" } } },
+# { "type": "key", "data" : { "down": true,
+# "key": {"type": "qcode", "data": "alt" } } },
+# { "type": "key", "data" : { "down": true,
+# "key": {"type": "qcode", "data": "delete" } } } ] } }
+# <- { "return": {} }
+#
+# 3. Move mouse pointer to absolute coordinates (20000, 400).
+#
+# -> { "execute": "input-send-event" ,
+# "arguments": { "events": [
+# { "type": "abs", "data" : { "axis": "x", "value" : 20000 } },
+# { "type": "abs", "data" : { "axis": "y", "value" : 400 } } ] } }
+# <- { "return": {} }
+#
##
{ 'command': 'input-send-event',
'data': { '*device': 'str',
@@ -4453,6 +5601,8 @@
#
# Information about memory backend
#
+# @id: #optional backend's ID if backend has 'id' property (since 2.9)
+#
# @size: memory backend size
#
# @merge: enables or disables memory merge support
@@ -4469,6 +5619,7 @@
##
{ 'struct': 'Memdev',
'data': {
+ '*id': 'str',
'size': 'size',
'merge': 'bool',
'dump': 'bool',
@@ -4484,6 +5635,31 @@
# Returns: a list of @Memdev.
#
# Since: 2.1
+#
+# Example:
+#
+# -> { "execute": "query-memdev" }
+# <- { "return": [
+# {
+# "id": "mem1",
+# "size": 536870912,
+# "merge": false,
+# "dump": true,
+# "prealloc": false,
+# "host-nodes": [0, 1],
+# "policy": "bind"
+# },
+# {
+# "size": 536870912,
+# "merge": false,
+# "dump": true,
+# "prealloc": true,
+# "host-nodes": [2, 3],
+# "policy": "preferred"
+# }
+# ]
+# }
+#
##
{ 'command': 'query-memdev', 'returns': ['Memdev'] }
@@ -4537,6 +5713,22 @@
# Lists available memory devices and their state
#
# Since: 2.1
+#
+# Example:
+#
+# -> { "execute": "query-memory-devices" }
+# <- { "return": [ { "data":
+# { "addr": 5368709120,
+# "hotpluggable": true,
+# "hotplugged": true,
+# "id": "d1",
+# "memdev": "/objects/memX",
+# "node": 0,
+# "size": 1073741824,
+# "slot": 0},
+# "type": "dimm"
+# } ] }
+#
##
{ 'command': 'query-memory-devices', 'returns': ['MemoryDeviceInfo'] }
@@ -4577,10 +5769,20 @@
##
# @query-acpi-ospm-status:
#
-# Lists ACPI OSPM status of ACPI device objects,
-# which might be reported via _OST method
+# Return a list of ACPIOSTInfo for devices that support status
+# reporting via ACPI _OST method.
#
# Since: 2.1
+#
+# Example:
+#
+# -> { "execute": "query-acpi-ospm-status" }
+# <- { "return": [ { "device": "d1", "slot": "0", "slot-type": "DIMM", "source": 1, "status": 0},
+# { "slot": "1", "slot-type": "DIMM", "source": 0, "status": 0},
+# { "slot": "2", "slot-type": "DIMM", "source": 0, "status": 0},
+# { "slot": "3", "slot-type": "DIMM", "source": 0, "status": 0}
+# ]}
+#
##
{ 'command': 'query-acpi-ospm-status', 'returns': ['ACPIOSTInfo'] }
@@ -4647,6 +5849,12 @@
# command.
#
# Since: 2.1
+#
+# Example:
+#
+# -> { "execute": "rtc-reset-reinjection" }
+# <- { "return": {} }
+#
##
{ 'command': 'rtc-reset-reinjection' }
@@ -4682,6 +5890,13 @@
# format.
#
# Since: 2.7
+#
+# Example:
+#
+# -> { "execute": "xen-load-devices-state",
+# "arguments": { "filename": "/tmp/resume" } }
+# <- { "return": {} }
+#
##
{ 'command': 'xen-load-devices-state', 'data': {'filename': 'str'} }
@@ -4718,6 +5933,13 @@
# Returns: a list of GICCapability objects.
#
# Since: 2.6
+#
+# Example:
+#
+# -> { "execute": "query-gic-capabilities" }
+# <- { "return": [{ "version": 2, "emulated": true, "kernel": false },
+# { "version": 3, "emulated": false, "kernel": true } ] }
+#
##
{ 'command': 'query-gic-capabilities', 'returns': ['GICCapability'] }
@@ -4728,17 +5950,17 @@
# it should be passed by management with device_add command when
# a CPU is being hotplugged.
#
+# @node-id: #optional NUMA node ID the CPU belongs to
+# @socket-id: #optional socket number within node/board the CPU belongs to
+# @core-id: #optional core number within socket the CPU belongs to
+# @thread-id: #optional thread number within core the CPU belongs to
+#
# Note: currently there are 4 properties that could be present
# but management should be prepared to pass through other
# properties with device_add command to allow for future
# interface extension. This also requires the filed names to be kept in
# sync with the properties passed to -device/device_add.
#
-# @node-id: #optional NUMA node ID the CPU belongs to
-# @socket-id: #optional socket number within node/board the CPU belongs to
-# @core-id: #optional core number within socket the CPU belongs to
-# @thread-id: #optional thread number within core the CPU belongs to
-#
# Since: 2.7
##
{ 'struct': 'CpuInstanceProperties',
@@ -4774,5 +5996,33 @@
# Returns: a list of HotpluggableCPU objects.
#
# Since: 2.7
+#
+# Example:
+#
+# For pseries machine type started with -smp 2,cores=2,maxcpus=4 -cpu POWER8:
+#
+# -> { "execute": "query-hotpluggable-cpus" }
+# <- {"return": [
+# { "props": { "core": 8 }, "type": "POWER8-spapr-cpu-core",
+# "vcpus-count": 1 },
+# { "props": { "core": 0 }, "type": "POWER8-spapr-cpu-core",
+# "vcpus-count": 1, "qom-path": "/machine/unattached/device[0]"}
+# ]}'
+#
+# For pc machine type started with -smp 1,maxcpus=2:
+#
+# -> { "execute": "query-hotpluggable-cpus" }
+# <- {"return": [
+# {
+# "type": "qemu64-x86_64-cpu", "vcpus-count": 1,
+# "props": {"core-id": 0, "socket-id": 1, "thread-id": 0}
+# },
+# {
+# "qom-path": "/machine/unattached/device[0]",
+# "type": "qemu64-x86_64-cpu", "vcpus-count": 1,
+# "props": {"core-id": 0, "socket-id": 0, "thread-id": 0}
+# }
+# ]}
+#
##
{ 'command': 'query-hotpluggable-cpus', 'returns': ['HotpluggableCPU'] }
diff --git a/qapi/block-core.json b/qapi/block-core.json
index 6b42216960..1b3e6eb0e8 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1,6 +1,8 @@
# -*- Mode: Python -*-
-#
-# QAPI block core definitions (vm unrelated)
+
+##
+# == QAPI block core definitions (vm unrelated)
+##
# QAPI common definitions
{ 'include': 'common.json' }
@@ -467,6 +469,87 @@
# Returns: a list of @BlockInfo describing each virtual block device
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-block" }
+# <- {
+# "return":[
+# {
+# "io-status": "ok",
+# "device":"ide0-hd0",
+# "locked":false,
+# "removable":false,
+# "inserted":{
+# "ro":false,
+# "drv":"qcow2",
+# "encrypted":false,
+# "file":"disks/test.qcow2",
+# "backing_file_depth":1,
+# "bps":1000000,
+# "bps_rd":0,
+# "bps_wr":0,
+# "iops":1000000,
+# "iops_rd":0,
+# "iops_wr":0,
+# "bps_max": 8000000,
+# "bps_rd_max": 0,
+# "bps_wr_max": 0,
+# "iops_max": 0,
+# "iops_rd_max": 0,
+# "iops_wr_max": 0,
+# "iops_size": 0,
+# "detect_zeroes": "on",
+# "write_threshold": 0,
+# "image":{
+# "filename":"disks/test.qcow2",
+# "format":"qcow2",
+# "virtual-size":2048000,
+# "backing_file":"base.qcow2",
+# "full-backing-filename":"disks/base.qcow2",
+# "backing-filename-format":"qcow2",
+# "snapshots":[
+# {
+# "id": "1",
+# "name": "snapshot1",
+# "vm-state-size": 0,
+# "date-sec": 10000200,
+# "date-nsec": 12,
+# "vm-clock-sec": 206,
+# "vm-clock-nsec": 30
+# }
+# ],
+# "backing-image":{
+# "filename":"disks/base.qcow2",
+# "format":"qcow2",
+# "virtual-size":2048000
+# }
+# }
+# },
+# "type":"unknown"
+# },
+# {
+# "io-status": "ok",
+# "device":"ide1-cd0",
+# "locked":false,
+# "removable":true,
+# "type":"unknown"
+# },
+# {
+# "device":"floppy0",
+# "locked":false,
+# "removable":true,
+# "type":"unknown"
+# },
+# {
+# "device":"sd0",
+# "locked":false,
+# "removable":true,
+# "type":"unknown"
+# }
+# ]
+# }
+#
##
{ 'command': 'query-block', 'returns': ['BlockInfo'] }
@@ -614,6 +697,9 @@
# @stats: A @BlockDeviceStats for the device.
#
# @parent: #optional This describes the file block device if it has one.
+# Contains recursively the statistics of the underlying
+# protocol (e.g. the host file for a qcow2 image). If there is
+# no underlying protocol, this field is omitted
#
# @backing: #optional This describes the backing block device if it has one.
# (Since 2.0)
@@ -641,6 +727,106 @@
# Returns: A list of @BlockStats for each virtual block devices.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-blockstats" }
+# <- {
+# "return":[
+# {
+# "device":"ide0-hd0",
+# "parent":{
+# "stats":{
+# "wr_highest_offset":3686448128,
+# "wr_bytes":9786368,
+# "wr_operations":751,
+# "rd_bytes":122567168,
+# "rd_operations":36772
+# "wr_total_times_ns":313253456
+# "rd_total_times_ns":3465673657
+# "flush_total_times_ns":49653
+# "flush_operations":61,
+# "rd_merged":0,
+# "wr_merged":0,
+# "idle_time_ns":2953431879,
+# "account_invalid":true,
+# "account_failed":false
+# }
+# },
+# "stats":{
+# "wr_highest_offset":2821110784,
+# "wr_bytes":9786368,
+# "wr_operations":692,
+# "rd_bytes":122739200,
+# "rd_operations":36604
+# "flush_operations":51,
+# "wr_total_times_ns":313253456
+# "rd_total_times_ns":3465673657
+# "flush_total_times_ns":49653,
+# "rd_merged":0,
+# "wr_merged":0,
+# "idle_time_ns":2953431879,
+# "account_invalid":true,
+# "account_failed":false
+# }
+# },
+# {
+# "device":"ide1-cd0",
+# "stats":{
+# "wr_highest_offset":0,
+# "wr_bytes":0,
+# "wr_operations":0,
+# "rd_bytes":0,
+# "rd_operations":0
+# "flush_operations":0,
+# "wr_total_times_ns":0
+# "rd_total_times_ns":0
+# "flush_total_times_ns":0,
+# "rd_merged":0,
+# "wr_merged":0,
+# "account_invalid":false,
+# "account_failed":false
+# }
+# },
+# {
+# "device":"floppy0",
+# "stats":{
+# "wr_highest_offset":0,
+# "wr_bytes":0,
+# "wr_operations":0,
+# "rd_bytes":0,
+# "rd_operations":0
+# "flush_operations":0,
+# "wr_total_times_ns":0
+# "rd_total_times_ns":0
+# "flush_total_times_ns":0,
+# "rd_merged":0,
+# "wr_merged":0,
+# "account_invalid":false,
+# "account_failed":false
+# }
+# },
+# {
+# "device":"sd0",
+# "stats":{
+# "wr_highest_offset":0,
+# "wr_bytes":0,
+# "wr_operations":0,
+# "rd_bytes":0,
+# "rd_operations":0
+# "flush_operations":0,
+# "wr_total_times_ns":0
+# "rd_total_times_ns":0
+# "flush_total_times_ns":0,
+# "rd_merged":0,
+# "wr_merged":0,
+# "account_invalid":false,
+# "account_failed":false
+# }
+# }
+# ]
+# }
+#
##
{ 'command': 'query-blockstats',
'data': { '*query-nodes': 'bool' },
@@ -785,6 +971,13 @@
# occur if an invalid password is specified.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "block_passwd", "arguments": { "device": "ide0-hd0",
+# "password": "12345" } }
+# <- { "return": {} }
+#
##
{ 'command': 'block_passwd', 'data': {'*device': 'str',
'*node-name': 'str', 'password': 'str'} }
@@ -806,6 +999,13 @@
# If @device is not a valid block device, DeviceNotFound
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "block_resize",
+# "arguments": { "device": "scratch", "size": 1073741824 } }
+# <- { "return": {} }
+#
##
{ 'command': 'block_resize', 'data': { '*device': 'str',
'*node-name': 'str',
@@ -837,7 +1037,9 @@
#
# @node-name: #optional graph node name to generate the snapshot from (Since 2.0)
#
-# @snapshot-file: the target of the new image. A new file will be created.
+# @snapshot-file: the target of the new image. If the file exists, or
+# if it is a device, the snapshot will be created in the existing
+# file/device. Otherwise, a new file will be created.
#
# @snapshot-node-name: #optional the graph node name of the new image (Since 2.0)
#
@@ -971,6 +1173,16 @@
# If @device is not a valid block device, DeviceNotFound
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "blockdev-snapshot-sync",
+# "arguments": { "device": "ide-hd0",
+# "snapshot-file":
+# "/some/place/my-image",
+# "format": "qcow2" } }
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-snapshot-sync',
'data': 'BlockdevSnapshotSync' }
@@ -981,9 +1193,31 @@
#
# Generates a snapshot of a block device.
#
+# Create a snapshot, by installing 'node' as the backing image of
+# 'overlay'. Additionally, if 'node' is associated with a block
+# device, the block device changes to using 'overlay' as its new active
+# image.
+#
# For the arguments, see the documentation of BlockdevSnapshot.
#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "blockdev-add",
+# "arguments": { "options": { "driver": "qcow2",
+# "node-name": "node1534",
+# "file": { "driver": "file",
+# "filename": "hd1.qcow2" },
+# "backing": "" } } }
+#
+# <- { "return": {} }
+#
+# -> { "execute": "blockdev-snapshot",
+# "arguments": { "node": "ide-hd0",
+# "overlay": "node1534" } }
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-snapshot',
'data': 'BlockdevSnapshot' }
@@ -999,7 +1233,9 @@
# updated.
#
# @image-node-name: The name of the block driver state node of the
-# image to modify.
+# image to modify. The "device" argument is used
+# to verify "image-node-name" is in the chain
+# described by "device".
#
# @device: The device name or node-name of the root node that owns
# image-node-name.
@@ -1009,6 +1245,10 @@
# when specifying the string or the image chain may
# not be able to be reopened again.
#
+# Returns: Nothing on success
+#
+# If "device" does not exist or cannot be determined, DeviceNotFound
+#
# Since: 2.1
##
{ 'command': 'change-backing-file',
@@ -1027,7 +1267,7 @@
# @device: the device name or node-name of a root node
#
# @base: #optional The file name of the backing image to write data into.
-# If not specified, this is the deepest backing image
+# If not specified, this is the deepest backing image.
#
# @top: #optional The file name of the backing image within the image chain,
# which contains the topmost data to be committed down. If
@@ -1073,6 +1313,13 @@
#
# Since: 1.3
#
+# Example:
+#
+# -> { "execute": "block-commit",
+# "arguments": { "device": "virtio0",
+# "top": "/tmp/snap1.qcow2" } }
+# <- { "return": {} }
+#
##
{ 'command': 'block-commit',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str', '*top': 'str',
@@ -1093,6 +1340,15 @@
# If @device is not a valid block device, GenericError
#
# Since: 1.6
+#
+# Example:
+#
+# -> { "execute": "drive-backup",
+# "arguments": { "device": "drive0",
+# "sync": "full",
+# "target": "backup.img" } }
+# <- { "return": {} }
+#
##
{ 'command': 'drive-backup', 'boxed': true,
'data': 'DriveBackup' }
@@ -1112,6 +1368,14 @@
# If @device is not a valid block device, DeviceNotFound
#
# Since: 2.3
+#
+# Example:
+# -> { "execute": "blockdev-backup",
+# "arguments": { "device": "src-id",
+# "sync": "full",
+# "target": "tgt-id" } }
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-backup', 'boxed': true,
'data': 'BlockdevBackup' }
@@ -1125,13 +1389,67 @@
# Returns: the list of BlockDeviceInfo
#
# Since: 2.0
+#
+# Example:
+#
+# -> { "execute": "query-named-block-nodes" }
+# <- { "return": [ { "ro":false,
+# "drv":"qcow2",
+# "encrypted":false,
+# "file":"disks/test.qcow2",
+# "node-name": "my-node",
+# "backing_file_depth":1,
+# "bps":1000000,
+# "bps_rd":0,
+# "bps_wr":0,
+# "iops":1000000,
+# "iops_rd":0,
+# "iops_wr":0,
+# "bps_max": 8000000,
+# "bps_rd_max": 0,
+# "bps_wr_max": 0,
+# "iops_max": 0,
+# "iops_rd_max": 0,
+# "iops_wr_max": 0,
+# "iops_size": 0,
+# "write_threshold": 0,
+# "image":{
+# "filename":"disks/test.qcow2",
+# "format":"qcow2",
+# "virtual-size":2048000,
+# "backing_file":"base.qcow2",
+# "full-backing-filename":"disks/base.qcow2",
+# "backing-filename-format":"qcow2",
+# "snapshots":[
+# {
+# "id": "1",
+# "name": "snapshot1",
+# "vm-state-size": 0,
+# "date-sec": 10000200,
+# "date-nsec": 12,
+# "vm-clock-sec": 206,
+# "vm-clock-nsec": 30
+# }
+# ],
+# "backing-image":{
+# "filename":"disks/base.qcow2",
+# "format":"qcow2",
+# "virtual-size":2048000
+# }
+# } } ] }
+#
##
{ 'command': 'query-named-block-nodes', 'returns': [ 'BlockDeviceInfo' ] }
##
# @drive-mirror:
#
-# Start mirroring a block device's writes to a new destination.
+# Start mirroring a block device's writes to a new destination. target
+# specifies the target of the new image. If the file exists, or if it
+# is a device, it will be used as the new destination for writes. If
+# it does not exist, a new file will be created. format specifies the
+# format of the mirror image, default is to probe if mode='existing',
+# else the format of the source.
#
# See DriveMirror for parameter descriptions
#
@@ -1139,6 +1457,16 @@
# If @device is not a valid block device, GenericError
#
# Since: 1.3
+#
+# Example:
+#
+# -> { "execute": "drive-mirror",
+# "arguments": { "device": "ide-hd0",
+# "target": "/some/place/my-image",
+# "sync": "full",
+# "format": "qcow2" } }
+# <- { "return": {} }
+#
##
{ 'command': 'drive-mirror', 'boxed': true,
'data': 'DriveMirror' }
@@ -1239,13 +1567,20 @@
##
# @block-dirty-bitmap-add:
#
-# Create a dirty bitmap with a name on the node
+# Create a dirty bitmap with a name on the node, and start tracking the writes.
#
# Returns: nothing on success
# If @node is not a valid block device or node, DeviceNotFound
# If @name is already taken, GenericError with an explanation
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "block-dirty-bitmap-add",
+# "arguments": { "node": "drive0", "name": "bitmap0" } }
+# <- { "return": {} }
+#
##
{ 'command': 'block-dirty-bitmap-add',
'data': 'BlockDirtyBitmapAdd' }
@@ -1253,7 +1588,8 @@
##
# @block-dirty-bitmap-remove:
#
-# Remove a dirty bitmap on the node
+# Stop write tracking and remove the dirty bitmap that was created
+# with block-dirty-bitmap-add.
#
# Returns: nothing on success
# If @node is not a valid block device or node, DeviceNotFound
@@ -1261,6 +1597,13 @@
# if @name is frozen by an operation, GenericError
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "block-dirty-bitmap-remove",
+# "arguments": { "node": "drive0", "name": "bitmap0" } }
+# <- { "return": {} }
+#
##
{ 'command': 'block-dirty-bitmap-remove',
'data': 'BlockDirtyBitmap' }
@@ -1268,13 +1611,22 @@
##
# @block-dirty-bitmap-clear:
#
-# Clear (reset) a dirty bitmap on the device
+# Clear (reset) a dirty bitmap on the device, so that an incremental
+# backup from this point in time forward will only backup clusters
+# modified after this clear operation.
#
# Returns: nothing on success
# If @node is not a valid block device, DeviceNotFound
# If @name is not found, GenericError with an explanation
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "block-dirty-bitmap-clear",
+# "arguments": { "node": "drive0", "name": "bitmap0" } }
+# <- { "return": {} }
+#
##
{ 'command': 'block-dirty-bitmap-clear',
'data': 'BlockDirtyBitmap' }
@@ -1322,6 +1674,15 @@
# Returns: nothing on success.
#
# Since: 2.6
+#
+# Example:
+#
+# -> { "execute": "blockdev-mirror",
+# "arguments": { "device": "ide-hd0",
+# "target": "target0",
+# "sync": "full" } }
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-mirror',
'data': { '*job-id': 'str', 'device': 'str', 'target': 'str',
@@ -1363,6 +1724,26 @@
# If @device is not a valid block device, DeviceNotFound
#
# Since: 1.1
+#
+# Example:
+#
+# -> { "execute": "block_set_io_throttle",
+# "arguments": { "id": "ide0-1-0",
+# "bps": 1000000,
+# "bps_rd": 0,
+# "bps_wr": 0,
+# "iops": 0,
+# "iops_rd": 0,
+# "iops_wr": 0,
+# "bps_max": 8000000,
+# "bps_rd_max": 0,
+# "bps_wr_max": 0,
+# "iops_max": 0,
+# "iops_rd_max": 0,
+# "iops_wr_max": 0,
+# "bps_max_length": 60,
+# "iops_size": 0 } }
+# <- { "return": {} }
##
{ 'command': 'block_set_io_throttle', 'boxed': true,
'data': 'BlockIOThrottle' }
@@ -1511,7 +1892,17 @@
# 'stop' and 'enospc' can only be used if the block device
# supports io-status (see BlockInfo). Since 1.3.
#
+# Returns: Nothing on success. If @device does not exist, DeviceNotFound.
+#
# Since: 1.1
+#
+# Example:
+#
+# -> { "execute": "block-stream",
+# "arguments": { "device": "virtio0",
+# "base": "/tmp/master.qcow2" } }
+# <- { "return": {} }
+#
##
{ 'command': 'block-stream',
'data': { '*job-id': 'str', 'device': 'str', '*base': 'str',
@@ -2441,13 +2832,52 @@
# BlockBackend will be created; otherwise, @node-name is mandatory at the top
# level and no BlockBackend will be created.
#
-# This command is still a work in progress. It doesn't support all
+# For the arguments, see the documentation of BlockdevOptions.
+#
+# Note: This command is still a work in progress. It doesn't support all
# block drivers among other things. Stay away from it unless you want
# to help with its development.
#
-# For the arguments, see the documentation of BlockdevOptions.
-#
# Since: 1.7
+#
+# Example:
+#
+# 1.
+# -> { "execute": "blockdev-add",
+# "arguments": {
+# "options" : { "driver": "qcow2",
+# "file": { "driver": "file",
+# "filename": "test.qcow2" } } } }
+# <- { "return": {} }
+#
+# 2.
+# -> { "execute": "blockdev-add",
+# "arguments": {
+# "options": {
+# "driver": "qcow2",
+# "node-name": "node0",
+# "discard": "unmap",
+# "cache": {
+# "direct": true,
+# "writeback": true
+# },
+# "file": {
+# "driver": "file",
+# "filename": "/tmp/test.qcow2"
+# },
+# "backing": {
+# "driver": "raw",
+# "file": {
+# "driver": "file",
+# "filename": "/dev/fdset/4"
+# }
+# }
+# }
+# }
+# }
+#
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-add', 'data': 'BlockdevOptions', 'boxed': true }
@@ -2458,13 +2888,35 @@
# The command will fail if the node is attached to a device or is
# otherwise being used.
#
-# This command is still a work in progress and is considered
+# @node-name: Name of the graph node to delete.
+#
+# Note: This command is still a work in progress and is considered
# experimental. Stay away from it unless you want to help with its
# development.
#
-# @node-name: Name of the graph node to delete.
-#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "blockdev-add",
+# "arguments": {
+# "options": {
+# "driver": "qcow2",
+# "node-name": "node0",
+# "file": {
+# "driver": "file",
+# "filename": "test.qcow2"
+# }
+# }
+# }
+# }
+# <- { "return": {} }
+#
+# -> { "execute": "x-blockdev-del",
+# "arguments": { "node-name": "node0" }
+# }
+# <- { "return": {} }
+#
##
{ 'command': 'x-blockdev-del', 'data': { 'node-name': 'str' } }
@@ -2496,6 +2948,21 @@
# it is locked
#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "blockdev-open-tray",
+# "arguments": { "id": "ide0-1-0" } }
+#
+# <- { "timestamp": { "seconds": 1418751016,
+# "microseconds": 716996 },
+# "event": "DEVICE_TRAY_MOVED",
+# "data": { "device": "ide1-cd0",
+# "id": "ide0-1-0",
+# "tray-open": true } }
+#
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-open-tray',
'data': { '*device': 'str',
@@ -2516,6 +2983,21 @@
# @id: #optional The name or QOM path of the guest device (since: 2.8)
#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "blockdev-close-tray",
+# "arguments": { "id": "ide0-1-0" } }
+#
+# <- { "timestamp": { "seconds": 1418751345,
+# "microseconds": 272147 },
+# "event": "DEVICE_TRAY_MOVED",
+# "data": { "device": "ide1-cd0",
+# "id": "ide0-1-0",
+# "tray-open": false } }
+#
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-close-tray',
'data': { '*device': 'str',
@@ -2530,14 +3012,40 @@
#
# If the tray is open and there is no medium inserted, this will be a no-op.
#
-# This command is still a work in progress and is considered experimental.
-# Stay away from it unless you want to help with its development.
-#
# @device: #optional Block device name (deprecated, use @id instead)
#
# @id: #optional The name or QOM path of the guest device (since: 2.8)
#
+# Note: This command is still a work in progress and is considered experimental.
+# Stay away from it unless you want to help with its development.
+#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "x-blockdev-remove-medium",
+# "arguments": { "id": "ide0-1-0" } }
+#
+# <- { "error": { "class": "GenericError",
+# "desc": "Tray of device 'ide0-1-0' is not open" } }
+#
+# -> { "execute": "blockdev-open-tray",
+# "arguments": { "id": "ide0-1-0" } }
+#
+# <- { "timestamp": { "seconds": 1418751627,
+# "microseconds": 549958 },
+# "event": "DEVICE_TRAY_MOVED",
+# "data": { "device": "ide1-cd0",
+# "id": "ide0-1-0",
+# "tray-open": true } }
+#
+# <- { "return": {} }
+#
+# -> { "execute": "x-blockdev-remove-medium",
+# "arguments": { "device": "ide0-1-0" } }
+#
+# <- { "return": {} }
+#
##
{ 'command': 'x-blockdev-remove-medium',
'data': { '*device': 'str',
@@ -2550,16 +3058,33 @@
# device's tray must currently be open (unless there is no attached guest
# device) and there must be no medium inserted already.
#
-# This command is still a work in progress and is considered experimental.
-# Stay away from it unless you want to help with its development.
-#
# @device: #optional Block device name (deprecated, use @id instead)
#
# @id: #optional The name or QOM path of the guest device (since: 2.8)
#
# @node-name: name of a node in the block driver state graph
#
+# Note: This command is still a work in progress and is considered experimental.
+# Stay away from it unless you want to help with its development.
+#
# Since: 2.5
+#
+# Example:
+#
+# -> { "execute": "blockdev-add",
+# "arguments": {
+# "options": { "node-name": "node0",
+# "driver": "raw",
+# "file": { "driver": "file",
+# "filename": "fedora.iso" } } } }
+# <- { "return": {} }
+#
+# -> { "execute": "x-blockdev-insert-medium",
+# "arguments": { "id": "ide0-1-0",
+# "node-name": "node0" } }
+#
+# <- { "return": {} }
+#
##
{ 'command': 'x-blockdev-insert-medium',
'data': { '*device': 'str',
@@ -2580,6 +3105,7 @@
# @read-write: Makes the device writable
#
# Since: 2.3
+#
##
{ 'enum': 'BlockdevChangeReadOnlyMode',
'data': ['retain', 'read-only', 'read-write'] }
@@ -2607,6 +3133,37 @@
# to 'retain'
#
# Since: 2.5
+#
+# Examples:
+#
+# 1. Change a removable medium
+#
+# -> { "execute": "blockdev-change-medium",
+# "arguments": { "id": "ide0-1-0",
+# "filename": "/srv/images/Fedora-12-x86_64-DVD.iso",
+# "format": "raw" } }
+# <- { "return": {} }
+#
+# 2. Load a read-only medium into a writable drive
+#
+# -> { "execute": "blockdev-change-medium",
+# "arguments": { "id": "floppyA",
+# "filename": "/srv/images/ro.img",
+# "format": "raw",
+# "read-only-mode": "retain" } }
+#
+# <- { "error":
+# { "class": "GenericError",
+# "desc": "Could not open '/srv/images/ro.img': Permission denied" } }
+#
+# -> { "execute": "blockdev-change-medium",
+# "arguments": { "id": "floppyA",
+# "filename": "/srv/images/ro.img",
+# "format": "raw",
+# "read-only-mode": "read-only" } }
+#
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-change-medium',
'data': { '*device': 'str',
@@ -2636,7 +3193,10 @@
##
# @BLOCK_IMAGE_CORRUPTED:
#
-# Emitted when a corruption has been detected in a disk image
+# Emitted when a disk image is being marked corrupt. The image can be
+# identified by its device or node name. The 'device' field is always
+# present for compatibility reasons, but it can be empty ("") if the
+# image does not have a device name associated.
#
# @device: device name. This is always present for compatibility
# reasons, but it can be empty ("") if the image does not
@@ -2654,10 +3214,21 @@
# @size: #optional, if the corruption resulted from an image access, this is
# the access size
#
-# fatal: if set, the image is marked corrupt and therefore unusable after this
+# @fatal: if set, the image is marked corrupt and therefore unusable after this
# event and must be repaired (Since 2.2; before, every
# BLOCK_IMAGE_CORRUPTED event was fatal)
#
+# Note: If action is "stop", a STOP event will eventually follow the
+# BLOCK_IO_ERROR event.
+#
+# Example:
+#
+# <- { "event": "BLOCK_IMAGE_CORRUPTED",
+# "data": { "device": "ide0-hd0", "node-name": "node0",
+# "msg": "Prevented active L1 table overwrite", "offset": 196608,
+# "size": 65536 },
+# "timestamp": { "seconds": 1378126126, "microseconds": 966463 } }
+#
# Since: 1.7
##
{ 'event': 'BLOCK_IMAGE_CORRUPTED',
@@ -2698,6 +3269,16 @@
# BLOCK_IO_ERROR event
#
# Since: 0.13.0
+#
+# Example:
+#
+# <- { "event": "BLOCK_IO_ERROR",
+# "data": { "device": "ide0-hd1",
+# "node-name": "#block212",
+# "operation": "write",
+# "action": "stop" },
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
+#
##
{ 'event': 'BLOCK_IO_ERROR',
'data': { 'device': 'str', 'node-name': 'str', 'operation': 'IoOperationType',
@@ -2727,6 +3308,15 @@
# interpret the error string
#
# Since: 1.1
+#
+# Example:
+#
+# <- { "event": "BLOCK_JOB_COMPLETED",
+# "data": { "type": "stream", "device": "virtio-disk0",
+# "len": 10737418240, "offset": 10737418240,
+# "speed": 0 },
+# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
+#
##
{ 'event': 'BLOCK_JOB_COMPLETED',
'data': { 'type' : 'BlockJobType',
@@ -2754,6 +3344,15 @@
# @speed: rate limit, bytes per second
#
# Since: 1.1
+#
+# Example:
+#
+# <- { "event": "BLOCK_JOB_CANCELLED",
+# "data": { "type": "stream", "device": "virtio-disk0",
+# "len": 10737418240, "offset": 134217728,
+# "speed": 0 },
+# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
+#
##
{ 'event': 'BLOCK_JOB_CANCELLED',
'data': { 'type' : 'BlockJobType',
@@ -2775,6 +3374,15 @@
# @action: action that has been taken
#
# Since: 1.3
+#
+# Example:
+#
+# <- { "event": "BLOCK_JOB_ERROR",
+# "data": { "device": "ide0-hd1",
+# "operation": "write",
+# "action": "stop" },
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
+#
##
{ 'event': 'BLOCK_JOB_ERROR',
'data': { 'device' : 'str',
@@ -2802,6 +3410,14 @@
# event
#
# Since: 1.3
+#
+# Example:
+#
+# <- { "event": "BLOCK_JOB_READY",
+# "data": { "device": "drive0", "type": "mirror", "speed": 0,
+# "len": 2097152, "offset": 2097152 }
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
+#
##
{ 'event': 'BLOCK_JOB_READY',
'data': { 'type' : 'BlockJobType',
@@ -2854,8 +3470,12 @@
##
# @block-set-write-threshold:
#
-# Change the write threshold for a block drive. An event will be delivered
-# if a write to this block drive crosses the configured threshold.
+# Change the write threshold for a block drive. An event will be
+# delivered if a write to this block drive crosses the configured
+# threshold. The threshold is an offset, thus must be
+# non-negative. Default is no write threshold. Setting the threshold
+# to zero disables it.
+#
# This is useful to transparently resize thin-provisioned drives without
# the guest OS noticing.
#
@@ -2865,6 +3485,14 @@
# Use 0 to disable the threshold.
#
# Since: 2.3
+#
+# Example:
+#
+# -> { "execute": "block-set-write-threshold",
+# "arguments": { "node-name": "mydev",
+# "write-threshold": 17179869184 } }
+# <- { "return": {} }
+#
##
{ 'command': 'block-set-write-threshold',
'data': { 'node-name': 'str', 'write-threshold': 'uint64' } }
@@ -2895,6 +3523,28 @@
# the rest of the array.
#
# Since: 2.7
+#
+# Example:
+#
+# 1. Add a new node to a quorum
+# -> { "execute": "blockdev-add",
+# "arguments": {
+# "options": { "driver": "raw",
+# "node-name": "new_node",
+# "file": { "driver": "file",
+# "filename": "test.raw" } } } }
+# <- { "return": {} }
+# -> { "execute": "x-blockdev-change",
+# "arguments": { "parent": "disk1",
+# "node": "new_node" } }
+# <- { "return": {} }
+#
+# 2. Delete a quorum's node
+# -> { "execute": "x-blockdev-change",
+# "arguments": { "parent": "disk1",
+# "child": "children.1" } }
+# <- { "return": {} }
+#
##
{ 'command': 'x-blockdev-change',
'data' : { 'parent': 'str',
diff --git a/qapi/block.json b/qapi/block.json
index 8e9f59019a..22da91441b 100644
--- a/qapi/block.json
+++ b/qapi/block.json
@@ -1,11 +1,17 @@
# -*- Mode: Python -*-
-#
-# QAPI block definitions (vm related)
+
+##
+# = QAPI block definitions
+##
# QAPI block core definitions
{ 'include': 'block-core.json' }
##
+# == QAPI block definitions (vm unrelated)
+##
+
+##
# @BiosAtaTranslation:
#
# Policy that BIOS should use to interpret cylinder/head/sector
@@ -75,19 +81,33 @@
##
# @blockdev-snapshot-internal-sync:
#
-# Synchronously take an internal snapshot of a block device, when the format
-# of the image used supports it.
+# Synchronously take an internal snapshot of a block device, when the
+# format of the image used supports it. If the name is an empty
+# string, or a snapshot with name already exists, the operation will
+# fail.
#
# For the arguments, see the documentation of BlockdevSnapshotInternal.
#
# Returns: nothing on success
+#
# If @device is not a valid block device, GenericError
+#
# If any snapshot matching @name exists, or @name is empty,
# GenericError
+#
# If the format of the image used does not support it,
# BlockFormatFeatureNotSupported
#
# Since: 1.7
+#
+# Example:
+#
+# -> { "execute": "blockdev-snapshot-internal-sync",
+# "arguments": { "device": "ide-hd0",
+# "name": "snapshot0" }
+# }
+# <- { "return": {} }
+#
##
{ 'command': 'blockdev-snapshot-internal-sync',
'data': 'BlockdevSnapshotInternal' }
@@ -115,6 +135,24 @@
# If @id and @name are both not specified, GenericError
#
# Since: 1.7
+#
+# Example:
+#
+# -> { "execute": "blockdev-snapshot-delete-internal-sync",
+# "arguments": { "device": "ide-hd0",
+# "name": "snapshot0" }
+# }
+# <- { "return": {
+# "id": "1",
+# "name": "snapshot0",
+# "vm-state-size": 0,
+# "date-sec": 1000012,
+# "date-nsec": 10,
+# "vm-clock-sec": 100,
+# "vm-clock-nsec": 20
+# }
+# }
+#
##
{ 'command': 'blockdev-snapshot-delete-internal-sync',
'data': { 'device': 'str', '*id': 'str', '*name': 'str'},
@@ -129,15 +167,21 @@
#
# @id: #optional The name or QOM path of the guest device (since: 2.8)
#
-# @force: @optional If true, eject regardless of whether the drive is locked.
+# @force: #optional If true, eject regardless of whether the drive is locked.
# If not specified, the default value is false.
#
# Returns: Nothing on success
+#
# If @device is not a valid block device, DeviceNotFound
#
-# Notes: Ejecting a device will no media results in success
+# Notes: Ejecting a device with no media results in success
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "eject", "arguments": { "device": "ide1-0-1" } }
+# <- { "return": {} }
##
{ 'command': 'eject',
'data': { '*device': 'str',
@@ -204,6 +248,16 @@
# @tray-open: true if the tray has been opened or false if it has been closed
#
# Since: 1.1
+#
+# Example:
+#
+# <- { "event": "DEVICE_TRAY_MOVED",
+# "data": { "device": "ide1-cd0",
+# "id": "/machine/unattached/device[22]",
+# "tray-open": true
+# },
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
+#
##
{ 'event': 'DEVICE_TRAY_MOVED',
'data': { 'device': 'str', 'id': 'str', 'tray-open': 'bool' } }
diff --git a/qapi/common.json b/qapi/common.json
index 624a8619c8..b626647b2f 100644
--- a/qapi/common.json
+++ b/qapi/common.json
@@ -1,6 +1,8 @@
# -*- Mode: Python -*-
-#
-# QAPI common definitions
+
+##
+# = QAPI common definitions
+##
##
# @QapiErrorClass:
@@ -75,6 +77,21 @@
# Returns: A @VersionInfo object describing the current version of QEMU.
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-version" }
+# <- {
+# "return":{
+# "qemu":{
+# "major":0,
+# "minor":11,
+# "micro":5
+# },
+# "package":""
+# }
+# }
+#
##
{ 'command': 'query-version', 'returns': 'VersionInfo' }
@@ -97,6 +114,23 @@
# Returns: A list of @CommandInfo for all supported commands
#
# Since: 0.14.0
+#
+# Example:
+#
+# -> { "execute": "query-commands" }
+# <- {
+# "return":[
+# {
+# "name":"query-balloon"
+# },
+# {
+# "name":"system_powerdown"
+# }
+# ]
+# }
+#
+# Note: This example has been shortened as the real response is too long.
+#
##
{ 'command': 'query-commands', 'returns': ['CommandInfo'] }
diff --git a/qapi/crypto.json b/qapi/crypto.json
index f4fd93b813..93a04743ea 100644
--- a/qapi/crypto.json
+++ b/qapi/crypto.json
@@ -1,6 +1,9 @@
# -*- Mode: Python -*-
#
-# QAPI crypto definitions
+
+##
+# = QAPI crypto definitions
+##
##
# @QCryptoTLSCredsEndpoint:
diff --git a/qapi/event.json b/qapi/event.json
index 37bf34ed6d..f3737b771f 100644
--- a/qapi/event.json
+++ b/qapi/event.json
@@ -1,3 +1,9 @@
+# -*- Mode: Python -*-
+
+##
+# = Other events
+##
+
##
# @SHUTDOWN:
#
@@ -8,6 +14,12 @@
# not exit, and a STOP event will eventually follow the SHUTDOWN event
#
# Since: 0.12.0
+#
+# Example:
+#
+# <- { "event": "SHUTDOWN",
+# "timestamp": { "seconds": 1267040730, "microseconds": 682951 } }
+#
##
{ 'event': 'SHUTDOWN' }
@@ -18,6 +30,12 @@
# system, such as via ACPI.
#
# Since: 0.12.0
+#
+# Example:
+#
+# <- { "event": "POWERDOWN",
+# "timestamp": { "seconds": 1267040730, "microseconds": 682951 } }
+#
##
{ 'event': 'POWERDOWN' }
@@ -27,6 +45,12 @@
# Emitted when the virtual machine is reset
#
# Since: 0.12.0
+#
+# Example:
+#
+# <- { "event": "RESET",
+# "timestamp": { "seconds": 1267041653, "microseconds": 9518 } }
+#
##
{ 'event': 'RESET' }
@@ -36,6 +60,12 @@
# Emitted when the virtual machine is stopped
#
# Since: 0.12.0
+#
+# Example:
+#
+# <- { "event": "STOP",
+# "timestamp": { "seconds": 1267041730, "microseconds": 281295 } }
+#
##
{ 'event': 'STOP' }
@@ -45,6 +75,12 @@
# Emitted when the virtual machine resumes execution
#
# Since: 0.12.0
+#
+# Example:
+#
+# <- { "event": "RESUME",
+# "timestamp": { "seconds": 1271770767, "microseconds": 582542 } }
+#
##
{ 'event': 'RESUME' }
@@ -55,6 +91,12 @@
# which is sometimes called standby state
#
# Since: 1.1
+#
+# Example:
+#
+# <- { "event": "SUSPEND",
+# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } }
+#
##
{ 'event': 'SUSPEND' }
@@ -67,6 +109,12 @@
# Note: QEMU shuts down (similar to event @SHUTDOWN) when entering this state
#
# Since: 1.2
+#
+# Example:
+#
+# <- { "event": "SUSPEND_DISK",
+# "timestamp": { "seconds": 1344456160, "microseconds": 309119 } }
+#
##
{ 'event': 'SUSPEND_DISK' }
@@ -76,6 +124,12 @@
# Emitted when the guest has woken up from suspend state and is running
#
# Since: 1.1
+#
+# Example:
+#
+# <- { "event": "WAKEUP",
+# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
+#
##
{ 'event': 'WAKEUP' }
@@ -87,7 +141,16 @@
# @offset: offset between base RTC clock (as specified by -rtc base), and
# new RTC clock value
#
+# Note: This event is rate-limited.
+#
# Since: 0.13.0
+#
+# Example:
+#
+# <- { "event": "RTC_CHANGE",
+# "data": { "offset": 78 },
+# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } }
+#
##
{ 'event': 'RTC_CHANGE',
'data': { 'offset': 'int' } }
@@ -102,7 +165,16 @@
# Note: If action is "reset", "shutdown", or "pause" the WATCHDOG event is
# followed respectively by the RESET, SHUTDOWN, or STOP events
#
+# Note: This event is rate-limited.
+#
# Since: 0.13.0
+#
+# Example:
+#
+# <- { "event": "WATCHDOG",
+# "data": { "action": "reset" },
+# "timestamp": { "seconds": 1267061043, "microseconds": 959568 } }
+#
##
{ 'event': 'WATCHDOG',
'data': { 'action': 'WatchdogExpirationAction' } }
@@ -119,6 +191,14 @@
# @path: device path
#
# Since: 1.5
+#
+# Example:
+#
+# <- { "event": "DEVICE_DELETED",
+# "data": { "device": "virtio-net-pci-0",
+# "path": "/machine/peripheral/virtio-net-pci-0" },
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
+#
##
{ 'event': 'DEVICE_DELETED',
'data': { '*device': 'str', 'path': 'str' } }
@@ -134,6 +214,15 @@
# @path: device path
#
# Since: 1.6
+#
+# Example:
+#
+# <- { "event": "NIC_RX_FILTER_CHANGED",
+# "data": { "name": "vnet0",
+# "path": "/machine/peripheral/vnet0/virtio-backend" },
+# "timestamp": { "seconds": 1368697518, "microseconds": 326866 } }
+# }
+#
##
{ 'event': 'NIC_RX_FILTER_CHANGED',
'data': { '*name': 'str', 'path': 'str' } }
@@ -151,6 +240,17 @@
# the authentication ID is not provided
#
# Since: 0.13.0
+#
+# Example:
+#
+# <- { "event": "VNC_CONNECTED",
+# "data": {
+# "server": { "auth": "sasl", "family": "ipv4",
+# "service": "5901", "host": "0.0.0.0" },
+# "client": { "family": "ipv4", "service": "58425",
+# "host": "127.0.0.1" } },
+# "timestamp": { "seconds": 1262976601, "microseconds": 975795 } }
+#
##
{ 'event': 'VNC_CONNECTED',
'data': { 'server': 'VncServerInfo',
@@ -167,6 +267,17 @@
# @client: client information
#
# Since: 0.13.0
+#
+# Example:
+#
+# <- { "event": "VNC_INITIALIZED",
+# "data": {
+# "server": { "auth": "sasl", "family": "ipv4",
+# "service": "5901", "host": "0.0.0.0"},
+# "client": { "family": "ipv4", "service": "46089",
+# "host": "127.0.0.1", "sasl_username": "luiz" } },
+# "timestamp": { "seconds": 1263475302, "microseconds": 150772 } }
+#
##
{ 'event': 'VNC_INITIALIZED',
'data': { 'server': 'VncServerInfo',
@@ -182,6 +293,17 @@
# @client: client information
#
# Since: 0.13.0
+#
+# Example:
+#
+# <- { "event": "VNC_DISCONNECTED",
+# "data": {
+# "server": { "auth": "sasl", "family": "ipv4",
+# "service": "5901", "host": "0.0.0.0" },
+# "client": { "family": "ipv4", "service": "58425",
+# "host": "127.0.0.1", "sasl_username": "luiz" } },
+# "timestamp": { "seconds": 1262976601, "microseconds": 975795 } }
+#
##
{ 'event': 'VNC_DISCONNECTED',
'data': { 'server': 'VncServerInfo',
@@ -197,6 +319,16 @@
# @client: client information
#
# Since: 0.14.0
+#
+# Example:
+#
+# <- { "timestamp": {"seconds": 1290688046, "microseconds": 388707},
+# "event": "SPICE_CONNECTED",
+# "data": {
+# "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"},
+# "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"}
+# }}
+#
##
{ 'event': 'SPICE_CONNECTED',
'data': { 'server': 'SpiceBasicInfo',
@@ -213,6 +345,18 @@
# @client: client information
#
# Since: 0.14.0
+#
+# Example:
+#
+# <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172},
+# "event": "SPICE_INITIALIZED",
+# "data": {"server": {"auth": "spice", "port": "5921",
+# "family": "ipv4", "host": "127.0.0.1"},
+# "client": {"port": "49004", "family": "ipv4", "channel-type": 3,
+# "connection-id": 1804289383, "host": "127.0.0.1",
+# "channel-id": 0, "tls": true}
+# }}
+#
##
{ 'event': 'SPICE_INITIALIZED',
'data': { 'server': 'SpiceServerInfo',
@@ -228,6 +372,16 @@
# @client: client information
#
# Since: 0.14.0
+#
+# Example:
+#
+# <- { "timestamp": {"seconds": 1290688046, "microseconds": 388707},
+# "event": "SPICE_DISCONNECTED",
+# "data": {
+# "server": { "port": "5920", "family": "ipv4", "host": "127.0.0.1"},
+# "client": {"port": "52873", "family": "ipv4", "host": "127.0.0.1"}
+# }}
+#
##
{ 'event': 'SPICE_DISCONNECTED',
'data': { 'server': 'SpiceBasicInfo',
@@ -239,6 +393,12 @@
# Emitted when SPICE migration has completed
#
# Since: 1.3
+#
+# Example:
+#
+# <- { "timestamp": {"seconds": 1290688046, "microseconds": 417172},
+# "event": "SPICE_MIGRATE_COMPLETED" }
+#
##
{ 'event': 'SPICE_MIGRATE_COMPLETED' }
@@ -250,6 +410,13 @@
# @status: @MigrationStatus describing the current migration status.
#
# Since: 2.4
+#
+# Example:
+#
+# <- {"timestamp": {"seconds": 1432121972, "microseconds": 744001},
+# "event": "MIGRATION",
+# "data": {"status": "completed"} }
+#
##
{ 'event': 'MIGRATION',
'data': {'status': 'MigrationStatus'}}
@@ -263,6 +430,12 @@
# @pass: An incrementing count (starting at 1 on the first pass)
#
# Since: 2.6
+#
+# Example:
+#
+# { "timestamp": {"seconds": 1449669631, "microseconds": 239225},
+# "event": "MIGRATION_PASS", "data": {"pass": 2} }
+#
##
{ 'event': 'MIGRATION_PASS',
'data': { 'pass': 'int' } }
@@ -272,9 +445,16 @@
#
# Emitted when guest executes ACPI _OST method.
#
+# @info: ACPIOSTInfo type as described in qapi-schema.json
+#
# Since: 2.1
#
-# @info: ACPIOSTInfo type as described in qapi-schema.json
+# Example:
+#
+# <- { "event": "ACPI_DEVICE_OST",
+# "data": { "device": "d1", "slot": "0",
+# "slot-type": "DIMM", "source": 1, "status": 0 } }
+#
##
{ 'event': 'ACPI_DEVICE_OST',
'data': { 'info': 'ACPIOSTInfo' } }
@@ -287,7 +467,16 @@
#
# @actual: actual level of the guest memory balloon in bytes
#
+# Note: this event is rate-limited.
+#
# Since: 1.2
+#
+# Example:
+#
+# <- { "event": "BALLOON_CHANGE",
+# "data": { "actual": 944766976 },
+# "timestamp": { "seconds": 1267020223, "microseconds": 435656 } }
+#
##
{ 'event': 'BALLOON_CHANGE',
'data': { 'actual': 'int' } }
@@ -300,6 +489,12 @@
# @action: action that has been taken, currently always "pause"
#
# Since: 1.5
+#
+# Example:
+#
+# <- { "event": "GUEST_PANICKED",
+# "data": { "action": "pause" } }
+#
##
{ 'event': 'GUEST_PANICKED',
'data': { 'action': 'GuestPanicAction' } }
@@ -315,7 +510,16 @@
#
# @sectors-count: failed read operation sector count
#
+# Note: This event is rate-limited.
+#
# Since: 2.0
+#
+# Example:
+#
+# <- { "event": "QUORUM_FAILURE",
+# "data": { "reference": "usr1", "sector-num": 345435, "sectors-count": 5 },
+# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
+#
##
{ 'event': 'QUORUM_FAILURE',
'data': { 'reference': 'str', 'sector-num': 'int', 'sectors-count': 'int' } }
@@ -338,7 +542,26 @@
#
# @sectors-count: failed read operation sector count
#
+# Note: This event is rate-limited.
+#
# Since: 2.0
+#
+# Example:
+#
+# 1. Read operation
+#
+# { "event": "QUORUM_REPORT_BAD",
+# "data": { "node-name": "node0", "sector-num": 345435, "sectors-count": 5,
+# "type": "read" },
+# "timestamp": { "seconds": 1344522075, "microseconds": 745528 } }
+#
+# 2. Flush operation
+#
+# { "event": "QUORUM_REPORT_BAD",
+# "data": { "node-name": "node0", "sector-num": 0, "sectors-count": 2097120,
+# "type": "flush", "error": "Broken pipe" },
+# "timestamp": { "seconds": 1456406829, "microseconds": 291763 } }
+#
##
{ 'event': 'QUORUM_REPORT_BAD',
'data': { 'type': 'QuorumOpType', '*error': 'str', 'node-name': 'str',
@@ -354,6 +577,13 @@
# @open: true if the guest has opened the virtio-serial port
#
# Since: 2.1
+#
+# Example:
+#
+# <- { "event": "VSERPORT_CHANGE",
+# "data": { "id": "channel0", "open": true },
+# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } }
+#
##
{ 'event': 'VSERPORT_CHANGE',
'data': { 'id': 'str', 'open': 'bool' } }
@@ -368,6 +598,15 @@
# @msg: Informative message
#
# Since: 2.4
+#
+# Example:
+#
+# <- { "event": "MEM_UNPLUG_ERROR"
+# "data": { "device": "dimm1",
+# "msg": "acpi: device unplug for unsupported device"
+# },
+# "timestamp": { "seconds": 1265044230, "microseconds": 450486 } }
+#
##
{ 'event': 'MEM_UNPLUG_ERROR',
'data': { 'device': 'str', 'msg': 'str' } }
@@ -384,6 +623,13 @@
# user should not try to interpret the error string.
#
# Since: 2.6
+#
+# Example:
+#
+# { "event": "DUMP_COMPLETED",
+# "data": {"result": {"total": 1090650112, "status": "completed",
+# "completed": 1090650112} } }
+#
##
{ 'event': 'DUMP_COMPLETED' ,
'data': { 'result': 'DumpQueryResult', '*error': 'str' } }
diff --git a/qapi/introspect.json b/qapi/introspect.json
index fd4dc84196..f6adc439bb 100644
--- a/qapi/introspect.json
+++ b/qapi/introspect.json
@@ -78,14 +78,13 @@
# @SchemaInfo:
#
# @name: the entity's name, inherited from @base.
+# The SchemaInfo is always referenced by this name.
# Commands and events have the name defined in the QAPI schema.
# Unlike command and event names, type names are not part of
# the wire ABI. Consequently, type names are meaningless
# strings here, although they are still guaranteed unique
# regardless of @meta-type.
#
-# All references to other SchemaInfo are by name.
-#
# @meta-type: the entity's meta type, inherited from @base.
#
# Additional members depend on the value of @meta-type.
@@ -258,7 +257,7 @@
#
# @ret-type: the name of the command's result type.
#
-# TODO @success-response (currently irrelevant, because it's QGA, not QMP)
+# TODO: @success-response (currently irrelevant, because it's QGA, not QMP)
#
# Since: 2.5
##
diff --git a/qapi/rocker.json b/qapi/rocker.json
index ace27760f1..97e2b8376f 100644
--- a/qapi/rocker.json
+++ b/qapi/rocker.json
@@ -1,4 +1,8 @@
##
+# = Rocker switch device
+##
+
+##
# @RockerSwitch:
#
# Rocker switch information.
@@ -22,6 +26,12 @@
# Returns: @Rocker information
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "query-rocker", "arguments": { "name": "sw1" } }
+# <- { "return": {"name": "sw1", "ports": 2, "id": 1327446905938}}
+#
##
{ 'command': 'query-rocker',
'data': { 'name': 'str' },
@@ -80,11 +90,21 @@
##
# @query-rocker-ports:
#
-# Return rocker switch information.
+# Return rocker switch port information.
#
-# Returns: @Rocker information
+# Returns: a list of @RockerPort information
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "query-rocker-ports", "arguments": { "name": "sw1" } }
+# <- { "return": [ {"duplex": "full", "enabled": true, "name": "sw1.1",
+# "autoneg": "off", "link-up": true, "speed": 10000},
+# {"duplex": "full", "enabled": true, "name": "sw1.2",
+# "autoneg": "off", "link-up": true, "speed": 10000}
+# ]}
+#
##
{ 'command': 'query-rocker-ports',
'data': { 'name': 'str' },
@@ -215,9 +235,23 @@
# @tbl-id: #optional flow table ID. If tbl-id is not specified, returns
# flow information for all tables.
#
-# Returns: @Rocker OF-DPA flow information
+# Returns: rocker OF-DPA flow information
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "query-rocker-of-dpa-flows",
+# "arguments": { "name": "sw1" } }
+# <- { "return": [ {"key": {"in-pport": 0, "priority": 1, "tbl-id": 0},
+# "hits": 138,
+# "cookie": 0,
+# "action": {"goto-tbl": 10},
+# "mask": {"in-pport": 4294901760}
+# },
+# {...more...},
+# ]}
+#
##
{ 'command': 'query-rocker-of-dpa-flows',
'data': { 'name': 'str', '*tbl-id': 'uint32' },
@@ -277,9 +311,28 @@
# @type: #optional group type. If type is not specified, returns
# group information for all group types.
#
-# Returns: @Rocker OF-DPA group information
+# Returns: rocker OF-DPA group information
#
# Since: 2.4
+#
+# Example:
+#
+# -> { "execute": "query-rocker-of-dpa-groups",
+# "arguments": { "name": "sw1" } }
+# <- { "return": [ {"type": 0, "out-pport": 2,
+# "pport": 2, "vlan-id": 3841,
+# "pop-vlan": 1, "id": 251723778},
+# {"type": 0, "out-pport": 0,
+# "pport": 0, "vlan-id": 3841,
+# "pop-vlan": 1, "id": 251723776},
+# {"type": 0, "out-pport": 1,
+# "pport": 1, "vlan-id": 3840,
+# "pop-vlan": 1, "id": 251658241},
+# {"type": 0, "out-pport": 0,
+# "pport": 0, "vlan-id": 3840,
+# "pop-vlan": 1, "id": 251658240}
+# ]}
+#
##
{ 'command': 'query-rocker-of-dpa-groups',
'data': { 'name': 'str', '*type': 'uint8' },
diff --git a/qapi/trace.json b/qapi/trace.json
index 4fd39b7792..2bfda7ac7c 100644
--- a/qapi/trace.json
+++ b/qapi/trace.json
@@ -5,6 +5,9 @@
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
+##
+# = Tracing commands
+##
##
# @TraceEventState:
@@ -59,6 +62,13 @@
# an error is returned.
#
# Since: 2.2
+#
+# Example:
+#
+# -> { "execute": "trace-event-get-state",
+# "arguments": { "name": "qemu_memalign" } }
+# <- { "return": [ { "name": "qemu_memalign", "state": "disabled" } ] }
+#
##
{ 'command': 'trace-event-get-state',
'data': {'name': 'str', '*vcpu': 'int'},
@@ -84,6 +94,13 @@
# error is returned.
#
# Since: 2.2
+#
+# Example:
+#
+# -> { "execute": "trace-event-set-state",
+# "arguments": { "name": "qemu_memalign", "enable": "true" } }
+# <- { "return": {} }
+#
##
{ 'command': 'trace-event-set-state',
'data': {'name': 'str', 'enable': 'bool', '*ignore-unavailable': 'bool',
diff --git a/qdev-monitor.c b/qdev-monitor.c
index c73410c02e..81d01df928 100644
--- a/qdev-monitor.c
+++ b/qdev-monitor.c
@@ -29,6 +29,7 @@
#include "qemu/error-report.h"
#include "qemu/help_option.h"
#include "sysemu/block-backend.h"
+#include "migration/migration.h"
/*
* Aliases were a bad idea from the start. Let's keep them
@@ -577,6 +578,14 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp)
return NULL;
}
+ if (only_migratable) {
+ if (dc->vmsd->unmigratable) {
+ error_setg(errp, "Device %s is not migratable, but "
+ "--only-migratable was specified", driver);
+ return NULL;
+ }
+ }
+
/* find bus */
path = qemu_opt_get(opts, "bus");
if (path != NULL) {
diff --git a/qemu-char.c b/qemu-char.c
index 2c9940cea4..d8da1677ff 100644
--- a/qemu-char.c
+++ b/qemu-char.c
@@ -499,7 +499,7 @@ void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...)
static void remove_fd_in_watch(CharDriverState *chr);
static void mux_chr_set_handlers(CharDriverState *chr, GMainContext *context);
-static void mux_set_focus(MuxDriver *d, int focus);
+static void mux_set_focus(CharDriverState *chr, int focus);
static int null_chr_write(CharDriverState *chr, const uint8_t *buf, int len)
{
@@ -666,7 +666,7 @@ static int mux_proc_byte(CharDriverState *chr, MuxDriver *d, int ch)
case 'c':
assert(d->mux_cnt > 0); /* handler registered with first fe */
/* Switch to the next registered device */
- mux_set_focus(d, (d->focus + 1) % d->mux_cnt);
+ mux_set_focus(chr, (d->focus + 1) % d->mux_cnt);
break;
case 't':
d->timestamps = !d->timestamps;
@@ -826,8 +826,10 @@ static void mux_chr_set_handlers(CharDriverState *chr, GMainContext *context)
context, true);
}
-static void mux_set_focus(MuxDriver *d, int focus)
+static void mux_set_focus(CharDriverState *chr, int focus)
{
+ MuxDriver *d = chr->opaque;
+
assert(focus >= 0);
assert(focus < d->mux_cnt);
@@ -836,6 +838,7 @@ static void mux_set_focus(MuxDriver *d, int focus)
}
d->focus = focus;
+ chr->be = d->backends[focus];
mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_IN);
}
@@ -935,7 +938,9 @@ void qemu_chr_fe_deinit(CharBackend *b)
if (b->chr) {
qemu_chr_fe_set_handlers(b, NULL, NULL, NULL, NULL, NULL, true);
- b->chr->be = NULL;
+ if (b->chr->be == b) {
+ b->chr->be = NULL;
+ }
if (b->chr->is_mux) {
MuxDriver *d = b->chr->opaque;
d->backends[b->tag] = NULL;
@@ -999,7 +1004,7 @@ void qemu_chr_fe_take_focus(CharBackend *b)
}
if (b->chr->is_mux) {
- mux_set_focus(b->chr->opaque, b->tag);
+ mux_set_focus(b->chr, b->tag);
}
}
@@ -3272,14 +3277,13 @@ static void tcp_chr_telnet_init(CharDriverState *chr)
}
-static void tcp_chr_tls_handshake(Object *source,
- Error *err,
+static void tcp_chr_tls_handshake(QIOTask *task,
gpointer user_data)
{
CharDriverState *chr = user_data;
TCPCharDriver *s = chr->opaque;
- if (err) {
+ if (qio_task_propagate_error(task, NULL)) {
tcp_chr_disconnect(chr);
} else {
if (s->do_telnetopt) {
@@ -3487,20 +3491,23 @@ static void tcp_chr_free(CharDriverState *chr)
}
-static void qemu_chr_socket_connected(Object *src, Error *err, void *opaque)
+static void qemu_chr_socket_connected(QIOTask *task, void *opaque)
{
- QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(src);
+ QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
CharDriverState *chr = opaque;
TCPCharDriver *s = chr->opaque;
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
check_report_connect_error(chr, err);
- object_unref(src);
- return;
+ error_free(err);
+ goto cleanup;
}
s->connect_err_reported = false;
tcp_chr_new_client(chr, sioc);
+
+ cleanup:
object_unref(OBJECT(sioc));
}
diff --git a/qemu-doc.texi b/qemu-doc.texi
index 02cb39d430..794ab4a080 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -1037,7 +1037,7 @@ qemu-system-i386 -iscsi initiator-name=iqn.qemu.test:my-initiator \
@node disk_images_gluster
@subsection GlusterFS disk images
-GlusterFS is an user space distributed file system.
+GlusterFS is a user space distributed file system.
You can boot from the GlusterFS disk image with the command:
@example
@@ -2138,7 +2138,17 @@ Use the executable @file{qemu-system-sparc64} to simulate a Sun4u
(UltraSPARC PC-like machine), Sun4v (T1 PC-like machine), or generic
Niagara (T1) machine. The Sun4u emulator is mostly complete, being
able to run Linux, NetBSD and OpenBSD in headless (-nographic) mode. The
-Sun4v and Niagara emulators are still a work in progress.
+Sun4v emulator is still a work in progress.
+
+The Niagara T1 emulator makes use of firmware and OS binaries supplied in the S10image/ directory
+of the OpenSPARC T1 project @url{http://download.oracle.com/technetwork/systems/opensparc/OpenSPARCT1_Arch.1.5.tar.bz2}
+and is able to boot the disk.s10hw2 Solaris image.
+@example
+qemu-system-sparc64 -M niagara -L /path-to/S10image/ \
+ -nographic -m 256 \
+ -drive if=pflash,readonly=on,file=/S10image/disk.s10hw2
+@end example
+
QEMU emulates the following peripherals:
@@ -2173,7 +2183,7 @@ Set OpenBIOS variables in NVRAM, for example:
qemu-system-sparc64 -prom-env 'auto-boot?=false'
@end example
-@item -M [sun4u|sun4v|Niagara]
+@item -M [sun4u|sun4v|niagara]
Set the emulated machine type. The default is sun4u.
@@ -2891,6 +2901,9 @@ The binary format is detected automatically.
@command{qemu-mips} TODO.
@command{qemu-mipsel} TODO.
+@cindex user mode (NiosII)
+@command{qemu-nios2} TODO.
+
@cindex user mode (PowerPC)
@command{qemu-ppc64abi32} TODO.
@command{qemu-ppc64} TODO.
diff --git a/qemu-img.c b/qemu-img.c
index 6949b73ca5..74e3362653 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -3455,13 +3455,11 @@ static int img_amend(int argc, char **argv)
create_opts = qemu_opts_append(create_opts, bs->drv->create_opts);
opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
- if (options) {
- qemu_opts_do_parse(opts, options, NULL, &err);
- if (err) {
- error_report_err(err);
- ret = -1;
- goto out;
- }
+ qemu_opts_do_parse(opts, options, NULL, &err);
+ if (err) {
+ error_report_err(err);
+ ret = -1;
+ goto out;
}
/* In case the driver does not call amend_status_cb() */
@@ -3559,20 +3557,23 @@ static void bench_cb(void *opaque, int ret)
}
while (b->n > b->in_flight && b->in_flight < b->nrreq) {
+ int64_t offset = b->offset;
+ /* blk_aio_* might look for completed I/Os and kick bench_cb
+ * again, so make sure this operation is counted by in_flight
+ * and b->offset is ready for the next submission.
+ */
+ b->in_flight++;
+ b->offset += b->step;
+ b->offset %= b->image_size;
if (b->write) {
- acb = blk_aio_pwritev(b->blk, b->offset, b->qiov, 0,
- bench_cb, b);
+ acb = blk_aio_pwritev(b->blk, offset, b->qiov, 0, bench_cb, b);
} else {
- acb = blk_aio_preadv(b->blk, b->offset, b->qiov, 0,
- bench_cb, b);
+ acb = blk_aio_preadv(b->blk, offset, b->qiov, 0, bench_cb, b);
}
if (!acb) {
error_report("Failed to issue request");
exit(EXIT_FAILURE);
}
- b->in_flight++;
- b->offset += b->step;
- b->offset %= b->image_size;
}
}
diff --git a/qemu-options-wrapper.h b/qemu-options-wrapper.h
index 13bfea0294..4d7aeb1352 100644
--- a/qemu-options-wrapper.h
+++ b/qemu-options-wrapper.h
@@ -14,7 +14,7 @@
#define ARCHHEADING(text, arch_mask) \
if ((arch_mask) & arch_type) \
- puts(stringify(text));
+ puts(stringify(text) ":");
#define DEFHEADING(text) ARCHHEADING(text, QEMU_ARCH_ALL)
diff --git a/qemu-options.hx b/qemu-options.hx
index c534a2f7f9..588e5beab3 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -6,7 +6,7 @@ HXCOMM construct option structures, enums and help message for specified
HXCOMM architectures.
HXCOMM HXCOMM can be used for comments, discarded from both texi and C
-DEFHEADING(Standard options:)
+DEFHEADING(Standard options)
STEXI
@table @option
ETEXI
@@ -32,7 +32,6 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" selects emulated machine ('-machine help' for list)\n"
" property accel=accel1[:accel2[:...]] selects accelerator\n"
" supported accelerators are kvm, xen, tcg (default: tcg)\n"
- " kernel_irqchip=on|off controls accelerated irqchip support\n"
" kernel_irqchip=on|off|split controls accelerated irqchip support (default=off)\n"
" vmport=on|off|auto controls emulation of vmport (default: auto)\n"
" kvm_shadow_mem=size of KVM shadow MMU in bytes\n"
@@ -119,11 +118,11 @@ specifies the maximum number of hotpluggable CPUs.
ETEXI
DEF("numa", HAS_ARG, QEMU_OPTION_numa,
- "-numa node[,mem=size][,cpus=cpu[-cpu]][,nodeid=node]\n"
- "-numa node[,memdev=id][,cpus=cpu[-cpu]][,nodeid=node]\n", QEMU_ARCH_ALL)
+ "-numa node[,mem=size][,cpus=firstcpu[-lastcpu]][,nodeid=node]\n"
+ "-numa node[,memdev=id][,cpus=firstcpu[-lastcpu]][,nodeid=node]\n", QEMU_ARCH_ALL)
STEXI
-@item -numa node[,mem=@var{size}][,cpus=@var{cpu[-cpu]}][,nodeid=@var{node}]
-@itemx -numa node[,memdev=@var{id}][,cpus=@var{cpu[-cpu]}][,nodeid=@var{node}]
+@item -numa node[,mem=@var{size}][,cpus=@var{firstcpu}[-@var{lastcpu}]][,nodeid=@var{node}]
+@itemx -numa node[,memdev=@var{id}][,cpus=@var{firstcpu}[-@var{lastcpu}]][,nodeid=@var{node}]
@findex -numa
Simulate a multi node NUMA system. If @samp{mem}, @samp{memdev}
and @samp{cpus} are omitted, resources are split equally. Also, note
@@ -250,7 +249,7 @@ use is discouraged as it may be removed from future versions.
ETEXI
DEF("m", HAS_ARG, QEMU_OPTION_m,
- "-m[emory] [size=]megs[,slots=n,maxmem=size]\n"
+ "-m [size=]megs[,slots=n,maxmem=size]\n"
" configure guest RAM\n"
" size: initial amount of guest memory\n"
" slots: number of hotplug slots (default: none)\n"
@@ -468,7 +467,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Block device options:)
+DEFHEADING(Block device options)
STEXI
@table @option
ETEXI
@@ -856,7 +855,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(USB options:)
+DEFHEADING(USB options)
STEXI
@table @option
ETEXI
@@ -920,14 +919,14 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Display options:)
+DEFHEADING(Display options)
STEXI
@table @option
ETEXI
DEF("display", HAS_ARG, QEMU_OPTION_display,
"-display sdl[,frame=on|off][,alt_grab=on|off][,ctrl_grab=on|off]\n"
- " [,window_close=on|off][,gl=on|off]|curses|none|\n"
+ " [,window_close=on|off][,gl=on|off]\n"
"-display gtk[,grab_on_hover=on|off][,gl=on|off]|\n"
"-display vnc=<display>[,<optargs>]\n"
"-display curses\n"
@@ -1449,7 +1448,7 @@ STEXI
ETEXI
ARCHHEADING(, QEMU_ARCH_I386)
-ARCHHEADING(i386 target only:, QEMU_ARCH_I386)
+ARCHHEADING(i386 target only, QEMU_ARCH_I386)
STEXI
@table @option
ETEXI
@@ -1565,7 +1564,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Network options:)
+DEFHEADING(Network options)
STEXI
@table @option
ETEXI
@@ -2146,7 +2145,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Character device options:)
+DEFHEADING(Character device options)
STEXI
The general form of a character device option is:
@@ -2481,7 +2480,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Device URL Syntax:)
+DEFHEADING(Device URL Syntax)
STEXI
In addition to using normal file images for the emulated storage devices,
@@ -2589,7 +2588,7 @@ qemu-system-i386 --drive file=sheepdog://192.0.2.1:30000/MyVirtualMachine
See also @url{http://http://www.osrg.net/sheepdog/}.
@item GlusterFS
-GlusterFS is an user space distributed file system.
+GlusterFS is a user space distributed file system.
QEMU supports the use of GlusterFS volumes for hosting VM disk images using
TCP, Unix Domain Sockets and RDMA transport protocols.
@@ -2711,7 +2710,7 @@ STEXI
@end table
ETEXI
-DEFHEADING(Bluetooth(R) options:)
+DEFHEADING(Bluetooth(R) options)
STEXI
@table @option
ETEXI
@@ -2787,7 +2786,7 @@ ETEXI
DEFHEADING()
#ifdef CONFIG_TPM
-DEFHEADING(TPM device options:)
+DEFHEADING(TPM device options)
DEF("tpmdev", HAS_ARG, QEMU_OPTION_tpmdev, \
"-tpmdev passthrough,id=id[,path=path][,cancel-path=path]\n"
@@ -2861,7 +2860,7 @@ DEFHEADING()
#endif
-DEFHEADING(Linux/Multiboot boot specific:)
+DEFHEADING(Linux/Multiboot boot specific)
STEXI
When using these options, you can use a given Linux or Multiboot
@@ -2917,7 +2916,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Debug/Expert options:)
+DEFHEADING(Debug/Expert options)
STEXI
@table @option
ETEXI
@@ -3280,6 +3279,17 @@ Enable KVM full virtualization support. This option is only available
if KVM support is enabled when compiling.
ETEXI
+DEF("enable-hax", 0, QEMU_OPTION_enable_hax, \
+ "-enable-hax enable HAX virtualization support\n", QEMU_ARCH_I386)
+STEXI
+@item -enable-hax
+@findex -enable-hax
+Enable HAX (Hardware-based Acceleration eXecution) support. This option
+is only available if HAX support is enabled when compiling. HAX is only
+applicable to MAC and Windows platform, and thus does not conflict with
+KVM.
+ETEXI
+
DEF("xen-domid", HAS_ARG, QEMU_OPTION_xen_domid,
"-xen-domid id specify xen guest domain id\n", QEMU_ARCH_ALL)
DEF("xen-create", 0, QEMU_OPTION_xen_create,
@@ -3574,6 +3584,15 @@ be used to change settings (such as migration parameters) prior to issuing
the migrate_incoming to allow the migration to begin.
ETEXI
+DEF("only-migratable", 0, QEMU_OPTION_only_migratable, \
+ "-only-migratable allow only migratable devices\n", QEMU_ARCH_ALL)
+STEXI
+@item -only-migratable
+@findex -only-migratable
+Only allow migratable devices. Devices will not be allowed to enter an
+unmigratable state.
+ETEXI
+
DEF("nodefaults", 0, QEMU_OPTION_nodefaults, \
"-nodefaults don't create default devices\n", QEMU_ARCH_ALL)
STEXI
@@ -3775,7 +3794,14 @@ Dump json-encoded vmstate information for current machine type to file
in @var{file}
ETEXI
+STEXI
+@end table
+ETEXI
+DEFHEADING()
DEFHEADING(Generic object creation)
+STEXI
+@table @option
+ETEXI
DEF("object", HAS_ARG, QEMU_OPTION_object,
"-object TYPENAME[,PROP1=VALUE1,...]\n"
diff --git a/qga/main.c b/qga/main.c
index 6caf215575..538e4ee299 100644
--- a/qga/main.c
+++ b/qga/main.c
@@ -558,8 +558,8 @@ static void process_command(GAState *s, QDict *req)
rsp = qmp_dispatch(QOBJECT(req));
if (rsp) {
ret = send_response(s, rsp);
- if (ret) {
- g_warning("error sending response: %s", strerror(ret));
+ if (ret < 0) {
+ g_warning("error sending response: %s", strerror(-ret));
}
qobject_decref(rsp);
}
diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json
index 94c03128fd..d421609dcb 100644
--- a/qga/qapi-schema.json
+++ b/qga/qapi-schema.json
@@ -697,21 +697,18 @@
# Returns: The length of the initial sublist that has been successfully
# processed. The guest agent maximizes this value. Possible cases:
#
-# 0: if the @vcpus list was empty on input. Guest state
+# - 0: if the @vcpus list was empty on input. Guest state
# has not been changed. Otherwise,
-#
-# Error: processing the first node of @vcpus failed for the
+# - Error: processing the first node of @vcpus failed for the
# reason returned. Guest state has not been changed.
# Otherwise,
-#
-# < length(@vcpus): more than zero initial nodes have been processed,
+# - < length(@vcpus): more than zero initial nodes have been processed,
# but not the entire @vcpus list. Guest state has
# changed accordingly. To retrieve the error
# (assuming it persists), repeat the call with the
# successfully processed initial sublist removed.
# Otherwise,
-#
-# length(@vcpus): call successful.
+# - length(@vcpus): call successful.
#
# Since: 1.5
##
diff --git a/qom/cpu.c b/qom/cpu.c
index 03d9190f8c..7f575879f6 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -22,7 +22,7 @@
#include "qapi/error.h"
#include "qemu-common.h"
#include "qom/cpu.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "qemu/notify.h"
#include "qemu/log.h"
#include "exec/log.h"
@@ -270,8 +270,14 @@ static void cpu_common_reset(CPUState *cpu)
cpu->exception_index = -1;
cpu->crash_occurred = false;
- for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
- atomic_set(&cpu->tb_jmp_cache[i], NULL);
+ if (tcg_enabled()) {
+ for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
+ atomic_set(&cpu->tb_jmp_cache[i], NULL);
+ }
+
+#ifdef CONFIG_SOFTMMU
+ tlb_flush(cpu, 0);
+#endif
}
}
@@ -348,6 +354,8 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp)
static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
{
CPUState *cpu = CPU(dev);
+ /* NOTE: latest generic point before the cpu is fully unrealized */
+ trace_fini_vcpu(cpu);
cpu_exec_unrealizefn(cpu);
}
diff --git a/qom/object.c b/qom/object.c
index 7a05e35ed9..eb4bc924ff 100644
--- a/qom/object.c
+++ b/qom/object.c
@@ -272,6 +272,12 @@ static void type_initialize(TypeImpl *ti)
ti->class_size = type_class_get_size(ti);
ti->instance_size = type_object_get_size(ti);
+ /* Any type with zero instance_size is implicitly abstract.
+ * This means interface types are all abstract.
+ */
+ if (ti->instance_size == 0) {
+ ti->abstract = true;
+ }
ti->class = g_malloc0(ti->class_size);
@@ -351,7 +357,7 @@ static void object_post_init_with_type(Object *obj, TypeImpl *ti)
}
}
-void object_initialize_with_type(void *data, size_t size, TypeImpl *type)
+static void object_initialize_with_type(void *data, size_t size, TypeImpl *type)
{
Object *obj = data;
@@ -467,7 +473,7 @@ static void object_finalize(void *data)
}
}
-Object *object_new_with_type(Type type)
+static Object *object_new_with_type(Type type)
{
Object *obj;
diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c
index ded4d84c85..03a95c3276 100644
--- a/qom/object_interfaces.c
+++ b/qom/object_interfaces.c
@@ -3,7 +3,6 @@
#include "qom/object_interfaces.h"
#include "qemu/module.h"
#include "qapi-visit.h"
-#include "qapi/qobject-output-visitor.h"
#include "qapi/opts-visitor.h"
void user_creatable_complete(Object *obj, Error **errp)
@@ -35,57 +34,6 @@ bool user_creatable_can_be_deleted(UserCreatable *uc, Error **errp)
}
}
-
-Object *user_creatable_add(const QDict *qdict,
- Visitor *v, Error **errp)
-{
- char *type = NULL;
- char *id = NULL;
- Object *obj = NULL;
- Error *local_err = NULL;
- QDict *pdict;
-
- pdict = qdict_clone_shallow(qdict);
-
- visit_start_struct(v, NULL, NULL, 0, &local_err);
- if (local_err) {
- goto out;
- }
-
- qdict_del(pdict, "qom-type");
- visit_type_str(v, "qom-type", &type, &local_err);
- if (local_err) {
- goto out_visit;
- }
-
- qdict_del(pdict, "id");
- visit_type_str(v, "id", &id, &local_err);
- if (local_err) {
- goto out_visit;
- }
- visit_check_struct(v, &local_err);
- if (local_err) {
- goto out_visit;
- }
-
- obj = user_creatable_add_type(type, id, pdict, v, &local_err);
-
-out_visit:
- visit_end_struct(v, NULL);
-
-out:
- QDECREF(pdict);
- g_free(id);
- g_free(type);
- if (local_err) {
- error_propagate(errp, local_err);
- object_unref(obj);
- return NULL;
- }
- return obj;
-}
-
-
Object *user_creatable_add_type(const char *type, const char *id,
const QDict *qdict,
Visitor *v, Error **errp)
@@ -114,6 +62,12 @@ Object *user_creatable_add_type(const char *type, const char *id,
assert(qdict);
obj = object_new(type);
+ if (object_property_find(obj, "id", NULL)) {
+ object_property_set_str(obj, id, "id", &local_err);
+ if (local_err) {
+ goto out;
+ }
+ }
visit_start_struct(v, NULL, NULL, 0, &local_err);
if (local_err) {
goto out;
@@ -158,13 +112,27 @@ Object *user_creatable_add_opts(QemuOpts *opts, Error **errp)
{
Visitor *v;
QDict *pdict;
- Object *obj = NULL;
+ Object *obj;
+ const char *id = qemu_opts_id(opts);
+ const char *type = qemu_opt_get(opts, "qom-type");
+
+ if (!type) {
+ error_setg(errp, QERR_MISSING_PARAMETER, "qom-type");
+ return NULL;
+ }
+ if (!id) {
+ error_setg(errp, QERR_MISSING_PARAMETER, "id");
+ return NULL;
+ }
- v = opts_visitor_new(opts);
pdict = qemu_opts_to_qdict(opts, NULL);
+ qdict_del(pdict, "qom-type");
+ qdict_del(pdict, "id");
- obj = user_creatable_add(pdict, v, errp);
+ v = opts_visitor_new(opts);
+ obj = user_creatable_add_type(type, id, pdict, v, errp);
visit_free(v);
+
QDECREF(pdict);
return obj;
}
diff --git a/qtest.c b/qtest.c
index 46b99aed52..bd9d417812 100644
--- a/qtest.c
+++ b/qtest.c
@@ -430,6 +430,8 @@ static void qtest_process_command(CharBackend *chr, gchar **words)
g_assert(words[1] && words[2]);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &len) == 0);
+ /* We'd send garbage to libqtest if len is 0 */
+ g_assert(len);
data = g_malloc(len);
cpu_physical_memory_read(addr, data, len);
diff --git a/replay/Makefile.objs b/replay/Makefile.objs
index c8ad3ebb89..b2afd4030a 100644
--- a/replay/Makefile.objs
+++ b/replay/Makefile.objs
@@ -5,3 +5,4 @@ common-obj-y += replay-time.o
common-obj-y += replay-input.o
common-obj-y += replay-char.o
common-obj-y += replay-snapshot.o
+common-obj-y += replay-net.o
diff --git a/replay/replay-events.c b/replay/replay-events.c
index c513913671..94a6dcccfc 100644
--- a/replay/replay-events.c
+++ b/replay/replay-events.c
@@ -54,6 +54,9 @@ static void replay_run_event(Event *event)
case REPLAY_ASYNC_EVENT_BLOCK:
aio_bh_call(event->opaque);
break;
+ case REPLAY_ASYNC_EVENT_NET:
+ replay_event_net_run(event->opaque);
+ break;
default:
error_report("Replay: invalid async event ID (%d) in the queue",
event->event_kind);
@@ -189,6 +192,9 @@ static void replay_save_event(Event *event, int checkpoint)
case REPLAY_ASYNC_EVENT_BLOCK:
replay_put_qword(event->id);
break;
+ case REPLAY_ASYNC_EVENT_NET:
+ replay_event_net_save(event->opaque);
+ break;
default:
error_report("Unknown ID %" PRId64 " of replay event", event->id);
exit(1);
@@ -252,6 +258,11 @@ static Event *replay_read_event(int checkpoint)
read_id = replay_get_qword();
}
break;
+ case REPLAY_ASYNC_EVENT_NET:
+ event = g_malloc0(sizeof(Event));
+ event->event_kind = read_event_kind;
+ event->opaque = replay_event_net_load();
+ return event;
default:
error_report("Unknown ID %d of replay event", read_event_kind);
exit(1);
diff --git a/replay/replay-internal.h b/replay/replay-internal.h
index 9117e442d0..c26d0795f2 100644
--- a/replay/replay-internal.h
+++ b/replay/replay-internal.h
@@ -50,6 +50,7 @@ enum ReplayAsyncEventKind {
REPLAY_ASYNC_EVENT_INPUT_SYNC,
REPLAY_ASYNC_EVENT_CHAR_READ,
REPLAY_ASYNC_EVENT_BLOCK,
+ REPLAY_ASYNC_EVENT_NET,
REPLAY_ASYNC_COUNT
};
@@ -161,6 +162,15 @@ void replay_event_char_read_save(void *opaque);
/*! Reads char event read from the file. */
void *replay_event_char_read_load(void);
+/* Network devices */
+
+/*! Called to run network event. */
+void replay_event_net_run(void *opaque);
+/*! Writes network event to the file. */
+void replay_event_net_save(void *opaque);
+/*! Reads network from the file. */
+void *replay_event_net_load(void);
+
/* VMState-related functions */
/* Registers replay VMState.
diff --git a/replay/replay-net.c b/replay/replay-net.c
new file mode 100644
index 0000000000..80b7054156
--- /dev/null
+++ b/replay/replay-net.c
@@ -0,0 +1,102 @@
+/*
+ * replay-net.c
+ *
+ * Copyright (c) 2010-2016 Institute for System Programming
+ * of the Russian Academy of Sciences.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "sysemu/replay.h"
+#include "replay-internal.h"
+#include "sysemu/sysemu.h"
+#include "net/net.h"
+#include "net/filter.h"
+#include "qemu/iov.h"
+
+struct ReplayNetState {
+ NetFilterState *nfs;
+ int id;
+};
+
+typedef struct NetEvent {
+ uint8_t id;
+ uint32_t flags;
+ uint8_t *data;
+ size_t size;
+} NetEvent;
+
+static NetFilterState **network_filters;
+static int network_filters_count;
+
+ReplayNetState *replay_register_net(NetFilterState *nfs)
+{
+ ReplayNetState *rns = g_new0(ReplayNetState, 1);
+ rns->nfs = nfs;
+ rns->id = network_filters_count++;
+ network_filters = g_realloc(network_filters,
+ network_filters_count
+ * sizeof(*network_filters));
+ network_filters[network_filters_count - 1] = nfs;
+ return rns;
+}
+
+void replay_unregister_net(ReplayNetState *rns)
+{
+ network_filters[rns->id] = NULL;
+ g_free(rns);
+}
+
+void replay_net_packet_event(ReplayNetState *rns, unsigned flags,
+ const struct iovec *iov, int iovcnt)
+{
+ NetEvent *event = g_new(NetEvent, 1);
+ event->flags = flags;
+ event->data = g_malloc(iov_size(iov, iovcnt));
+ event->size = iov_size(iov, iovcnt);
+ event->id = rns->id;
+ iov_to_buf(iov, iovcnt, 0, event->data, event->size);
+
+ replay_add_event(REPLAY_ASYNC_EVENT_NET, event, NULL, 0);
+}
+
+void replay_event_net_run(void *opaque)
+{
+ NetEvent *event = opaque;
+ struct iovec iov = {
+ .iov_base = (void *)event->data,
+ .iov_len = event->size
+ };
+
+ assert(event->id < network_filters_count);
+
+ qemu_netfilter_pass_to_next(network_filters[event->id]->netdev,
+ event->flags, &iov, 1, network_filters[event->id]);
+
+ g_free(event->data);
+ g_free(event);
+}
+
+void replay_event_net_save(void *opaque)
+{
+ NetEvent *event = opaque;
+
+ replay_put_byte(event->id);
+ replay_put_dword(event->flags);
+ replay_put_array(event->data, event->size);
+}
+
+void *replay_event_net_load(void)
+{
+ NetEvent *event = g_new(NetEvent, 1);
+
+ event->id = replay_get_byte();
+ event->flags = replay_get_dword();
+ replay_get_array_alloc(&event->data, &event->size);
+
+ return event;
+}
diff --git a/replay/replay.c b/replay/replay.c
index c797aeae8a..7f27cf17b0 100644
--- a/replay/replay.c
+++ b/replay/replay.c
@@ -21,7 +21,7 @@
/* Current version of the replay mechanism.
Increase it when file format changes. */
-#define REPLAY_VERSION 0xe02004
+#define REPLAY_VERSION 0xe02005
/* Size of replay log header */
#define HEADER_SIZE (sizeof(uint32_t) + sizeof(uint64_t))
diff --git a/rules.mak b/rules.mak
index ce9e7e6ffe..d5c516caff 100644
--- a/rules.mak
+++ b/rules.mak
@@ -363,3 +363,15 @@ define unnest-vars
$(eval -include $(patsubst %.o,%.d,$(patsubst %.mo,%.d,$($v))))
$(eval $v := $(filter-out %/,$($v))))
endef
+
+TEXI2MAN = $(call quiet-command, \
+ perl -Ww -- $(SRC_PATH)/scripts/texi2pod.pl $< $@.pod && \
+ $(POD2MAN) --section=$(subst .,,$(suffix $@)) --center=" " --release=" " $@.pod > $@, \
+ "GEN","$@")
+
+%.1:
+ $(call TEXI2MAN)
+%.7:
+ $(call TEXI2MAN)
+%.8:
+ $(call TEXI2MAN)
diff --git a/scripts/hxtool b/scripts/hxtool
index 04f7d7b0ed..5468cd7782 100644
--- a/scripts/hxtool
+++ b/scripts/hxtool
@@ -16,6 +16,13 @@ hxtoh()
done
}
+print_texi_heading()
+{
+ if test "$*" != ""; then
+ printf "@subsection %s\n" "$*"
+ fi
+}
+
hxtotexi()
{
flag=0
@@ -45,10 +52,10 @@ hxtotexi()
fi
;;
DEFHEADING*)
- printf '%s\n' "$(expr "$str" : "DEFHEADING(\(.*\))")"
+ print_texi_heading "$(expr "$str" : "DEFHEADING(\(.*\))")"
;;
ARCHHEADING*)
- printf '%s\n' "$(expr "$str" : "ARCHHEADING(\(.*\),.*)")"
+ print_texi_heading "$(expr "$str" : "ARCHHEADING(\(.*\),.*)")"
;;
*)
test $flag -eq 1 && printf '%s\n' "$str"
diff --git a/scripts/qapi.py b/scripts/qapi.py
index 21bc32fda3..53a44779d0 100644
--- a/scripts/qapi.py
+++ b/scripts/qapi.py
@@ -91,35 +91,154 @@ def error_path(parent):
return res
-class QAPISchemaError(Exception):
- def __init__(self, schema, msg):
+class QAPIError(Exception):
+ def __init__(self, fname, line, col, incl_info, msg):
Exception.__init__(self)
- self.fname = schema.fname
+ self.fname = fname
+ self.line = line
+ self.col = col
+ self.info = incl_info
self.msg = msg
- self.col = 1
- self.line = schema.line
- for ch in schema.src[schema.line_pos:schema.pos]:
+
+ def __str__(self):
+ loc = "%s:%d" % (self.fname, self.line)
+ if self.col is not None:
+ loc += ":%s" % self.col
+ return error_path(self.info) + "%s: %s" % (loc, self.msg)
+
+
+class QAPIParseError(QAPIError):
+ def __init__(self, parser, msg):
+ col = 1
+ for ch in parser.src[parser.line_pos:parser.pos]:
if ch == '\t':
- self.col = (self.col + 7) % 8 + 1
+ col = (col + 7) % 8 + 1
else:
- self.col += 1
- self.info = schema.incl_info
+ col += 1
+ QAPIError.__init__(self, parser.fname, parser.line, col,
+ parser.incl_info, msg)
- def __str__(self):
- return error_path(self.info) + \
- "%s:%d:%d: %s" % (self.fname, self.line, self.col, self.msg)
+class QAPISemError(QAPIError):
+ def __init__(self, info, msg):
+ QAPIError.__init__(self, info['file'], info['line'], None,
+ info['parent'], msg)
-class QAPIExprError(Exception):
- def __init__(self, expr_info, msg):
- Exception.__init__(self)
- assert expr_info
- self.info = expr_info
- self.msg = msg
- def __str__(self):
- return error_path(self.info['parent']) + \
- "%s:%d: %s" % (self.info['file'], self.info['line'], self.msg)
+class QAPIDoc(object):
+ class Section(object):
+ def __init__(self, name=None):
+ # optional section name (argument/member or section name)
+ self.name = name
+ # the list of lines for this section
+ self.content = []
+
+ def append(self, line):
+ self.content.append(line)
+
+ def __repr__(self):
+ return "\n".join(self.content).strip()
+
+ class ArgSection(Section):
+ pass
+
+ def __init__(self, parser, info):
+ # self.parser is used to report errors with QAPIParseError. The
+ # resulting error position depends on the state of the parser.
+ # It happens to be the beginning of the comment. More or less
+ # servicable, but action at a distance.
+ self.parser = parser
+ self.info = info
+ self.symbol = None
+ self.body = QAPIDoc.Section()
+ # dict mapping parameter name to ArgSection
+ self.args = OrderedDict()
+ # a list of Section
+ self.sections = []
+ # the current section
+ self.section = self.body
+ # associated expression (to be set by expression parser)
+ self.expr = None
+
+ def has_section(self, name):
+ """Return True if we have a section with this name."""
+ for i in self.sections:
+ if i.name == name:
+ return True
+ return False
+
+ def append(self, line):
+ """Parse a comment line and add it to the documentation."""
+ line = line[1:]
+ if not line:
+ self._append_freeform(line)
+ return
+
+ if line[0] != ' ':
+ raise QAPIParseError(self.parser, "Missing space after #")
+ line = line[1:]
+
+ # FIXME not nice: things like '# @foo:' and '# @foo: ' aren't
+ # recognized, and get silently treated as ordinary text
+ if self.symbol:
+ self._append_symbol_line(line)
+ elif not self.body.content and line.startswith("@"):
+ if not line.endswith(":"):
+ raise QAPIParseError(self.parser, "Line should end with :")
+ self.symbol = line[1:-1]
+ # FIXME invalid names other than the empty string aren't flagged
+ if not self.symbol:
+ raise QAPIParseError(self.parser, "Invalid name")
+ else:
+ self._append_freeform(line)
+
+ def _append_symbol_line(self, line):
+ name = line.split(' ', 1)[0]
+
+ if name.startswith("@") and name.endswith(":"):
+ line = line[len(name)+1:]
+ self._start_args_section(name[1:-1])
+ elif name in ("Returns:", "Since:",
+ # those are often singular or plural
+ "Note:", "Notes:",
+ "Example:", "Examples:",
+ "TODO:"):
+ line = line[len(name)+1:]
+ self._start_section(name[:-1])
+
+ self._append_freeform(line)
+
+ def _start_args_section(self, name):
+ # FIXME invalid names other than the empty string aren't flagged
+ if not name:
+ raise QAPIParseError(self.parser, "Invalid parameter name")
+ if name in self.args:
+ raise QAPIParseError(self.parser,
+ "'%s' parameter name duplicated" % name)
+ if self.sections:
+ raise QAPIParseError(self.parser,
+ "'@%s:' can't follow '%s' section"
+ % (name, self.sections[0].name))
+ self.section = QAPIDoc.ArgSection(name)
+ self.args[name] = self.section
+
+ def _start_section(self, name=""):
+ if name in ("Returns", "Since") and self.has_section(name):
+ raise QAPIParseError(self.parser,
+ "Duplicated '%s' section" % name)
+ self.section = QAPIDoc.Section(name)
+ self.sections.append(self.section)
+
+ def _append_freeform(self, line):
+ in_arg = isinstance(self.section, QAPIDoc.ArgSection)
+ if (in_arg and self.section.content
+ and not self.section.content[-1]
+ and line and not line[0].isspace()):
+ self._start_section()
+ if (in_arg or not self.section.name
+ or not self.section.name.startswith("Example")):
+ line = line.strip()
+ self.section.append(line)
class QAPISchemaParser(object):
@@ -137,46 +256,58 @@ class QAPISchemaParser(object):
self.line = 1
self.line_pos = 0
self.exprs = []
+ self.docs = []
self.accept()
while self.tok is not None:
- expr_info = {'file': fname, 'line': self.line,
- 'parent': self.incl_info}
+ info = {'file': fname, 'line': self.line,
+ 'parent': self.incl_info}
+ if self.tok == '#':
+ doc = self.get_doc(info)
+ self.docs.append(doc)
+ continue
+
expr = self.get_expr(False)
if isinstance(expr, dict) and "include" in expr:
if len(expr) != 1:
- raise QAPIExprError(expr_info,
- "Invalid 'include' directive")
+ raise QAPISemError(info, "Invalid 'include' directive")
include = expr["include"]
if not isinstance(include, str):
- raise QAPIExprError(expr_info,
- "Value of 'include' must be a string")
+ raise QAPISemError(info,
+ "Value of 'include' must be a string")
incl_abs_fname = os.path.join(os.path.dirname(abs_fname),
include)
# catch inclusion cycle
- inf = expr_info
+ inf = info
while inf:
if incl_abs_fname == os.path.abspath(inf['file']):
- raise QAPIExprError(expr_info, "Inclusion loop for %s"
- % include)
+ raise QAPISemError(info, "Inclusion loop for %s"
+ % include)
inf = inf['parent']
+
# skip multiple include of the same file
if incl_abs_fname in previously_included:
continue
try:
fobj = open(incl_abs_fname, 'r')
except IOError as e:
- raise QAPIExprError(expr_info,
- '%s: %s' % (e.strerror, include))
+ raise QAPISemError(info, '%s: %s' % (e.strerror, include))
exprs_include = QAPISchemaParser(fobj, previously_included,
- expr_info)
+ info)
self.exprs.extend(exprs_include.exprs)
+ self.docs.extend(exprs_include.docs)
else:
expr_elem = {'expr': expr,
- 'info': expr_info}
+ 'info': info}
+ if (self.docs
+ and self.docs[-1].info['file'] == fname
+ and not self.docs[-1].expr):
+ self.docs[-1].expr = expr
+ expr_elem['doc'] = self.docs[-1]
+
self.exprs.append(expr_elem)
- def accept(self):
+ def accept(self, skip_comment=True):
while True:
self.tok = self.src[self.cursor]
self.pos = self.cursor
@@ -184,7 +315,13 @@ class QAPISchemaParser(object):
self.val = None
if self.tok == '#':
+ if self.src[self.cursor] == '#':
+ # Start of doc comment
+ skip_comment = False
self.cursor = self.src.find('\n', self.cursor)
+ if not skip_comment:
+ self.val = self.src[self.pos:self.cursor]
+ return
elif self.tok in "{}:,[]":
return
elif self.tok == "'":
@@ -194,8 +331,7 @@ class QAPISchemaParser(object):
ch = self.src[self.cursor]
self.cursor += 1
if ch == '\n':
- raise QAPISchemaError(self,
- 'Missing terminating "\'"')
+ raise QAPIParseError(self, 'Missing terminating "\'"')
if esc:
if ch == 'b':
string += '\b'
@@ -213,25 +349,25 @@ class QAPISchemaParser(object):
ch = self.src[self.cursor]
self.cursor += 1
if ch not in "0123456789abcdefABCDEF":
- raise QAPISchemaError(self,
- '\\u escape needs 4 '
- 'hex digits')
+ raise QAPIParseError(self,
+ '\\u escape needs 4 '
+ 'hex digits')
value = (value << 4) + int(ch, 16)
# If Python 2 and 3 didn't disagree so much on
# how to handle Unicode, then we could allow
# Unicode string defaults. But most of QAPI is
# ASCII-only, so we aren't losing much for now.
if not value or value > 0x7f:
- raise QAPISchemaError(self,
- 'For now, \\u escape '
- 'only supports non-zero '
- 'values up to \\u007f')
+ raise QAPIParseError(self,
+ 'For now, \\u escape '
+ 'only supports non-zero '
+ 'values up to \\u007f')
string += chr(value)
elif ch in "\\/'\"":
string += ch
else:
- raise QAPISchemaError(self,
- "Unknown escape \\%s" % ch)
+ raise QAPIParseError(self,
+ "Unknown escape \\%s" % ch)
esc = False
elif ch == "\\":
esc = True
@@ -259,7 +395,7 @@ class QAPISchemaParser(object):
self.line += 1
self.line_pos = self.cursor
elif not self.tok.isspace():
- raise QAPISchemaError(self, 'Stray "%s"' % self.tok)
+ raise QAPIParseError(self, 'Stray "%s"' % self.tok)
def get_members(self):
expr = OrderedDict()
@@ -267,24 +403,24 @@ class QAPISchemaParser(object):
self.accept()
return expr
if self.tok != "'":
- raise QAPISchemaError(self, 'Expected string or "}"')
+ raise QAPIParseError(self, 'Expected string or "}"')
while True:
key = self.val
self.accept()
if self.tok != ':':
- raise QAPISchemaError(self, 'Expected ":"')
+ raise QAPIParseError(self, 'Expected ":"')
self.accept()
if key in expr:
- raise QAPISchemaError(self, 'Duplicate key "%s"' % key)
+ raise QAPIParseError(self, 'Duplicate key "%s"' % key)
expr[key] = self.get_expr(True)
if self.tok == '}':
self.accept()
return expr
if self.tok != ',':
- raise QAPISchemaError(self, 'Expected "," or "}"')
+ raise QAPIParseError(self, 'Expected "," or "}"')
self.accept()
if self.tok != "'":
- raise QAPISchemaError(self, 'Expected string')
+ raise QAPIParseError(self, 'Expected string')
def get_values(self):
expr = []
@@ -292,20 +428,20 @@ class QAPISchemaParser(object):
self.accept()
return expr
if self.tok not in "{['tfn":
- raise QAPISchemaError(self, 'Expected "{", "[", "]", string, '
- 'boolean or "null"')
+ raise QAPIParseError(self, 'Expected "{", "[", "]", string, '
+ 'boolean or "null"')
while True:
expr.append(self.get_expr(True))
if self.tok == ']':
self.accept()
return expr
if self.tok != ',':
- raise QAPISchemaError(self, 'Expected "," or "]"')
+ raise QAPIParseError(self, 'Expected "," or "]"')
self.accept()
def get_expr(self, nested):
if self.tok != '{' and not nested:
- raise QAPISchemaError(self, 'Expected "{"')
+ raise QAPIParseError(self, 'Expected "{"')
if self.tok == '{':
self.accept()
expr = self.get_members()
@@ -316,9 +452,31 @@ class QAPISchemaParser(object):
expr = self.val
self.accept()
else:
- raise QAPISchemaError(self, 'Expected "{", "[" or string')
+ raise QAPIParseError(self, 'Expected "{", "[" or string')
return expr
+ def get_doc(self, info):
+ if self.val != '##':
+ raise QAPIParseError(self, "Junk after '##' at start of "
+ "documentation comment")
+
+ doc = QAPIDoc(self, info)
+ self.accept(False)
+ while self.tok == '#':
+ if self.val.startswith('##'):
+ # End of doc comment
+ if self.val != '##':
+ raise QAPIParseError(self, "Junk after '##' at end of "
+ "documentation comment")
+ self.accept()
+ return doc
+ else:
+ doc.append(self.val)
+ self.accept(False)
+
+ raise QAPIParseError(self, "Documentation comment must end with '##'")
+
+
#
# Semantic analysis of schema expressions
# TODO fold into QAPISchema
@@ -375,20 +533,18 @@ valid_name = re.compile('^(__[a-zA-Z0-9.-]+_)?'
'[a-zA-Z][a-zA-Z0-9_-]*$')
-def check_name(expr_info, source, name, allow_optional=False,
+def check_name(info, source, name, allow_optional=False,
enum_member=False):
global valid_name
membername = name
if not isinstance(name, str):
- raise QAPIExprError(expr_info,
- "%s requires a string name" % source)
+ raise QAPISemError(info, "%s requires a string name" % source)
if name.startswith('*'):
membername = name[1:]
if not allow_optional:
- raise QAPIExprError(expr_info,
- "%s does not allow optional name '%s'"
- % (source, name))
+ raise QAPISemError(info, "%s does not allow optional name '%s'"
+ % (source, name))
# Enum members can start with a digit, because the generated C
# code always prefixes it with the enum name
if enum_member and membername[0].isdigit():
@@ -397,8 +553,7 @@ def check_name(expr_info, source, name, allow_optional=False,
# and 'q_obj_*' implicit type names.
if not valid_name.match(membername) or \
c_name(membername, False).startswith('q_'):
- raise QAPIExprError(expr_info,
- "%s uses invalid name '%s'" % (source, name))
+ raise QAPISemError(info, "%s uses invalid name '%s'" % (source, name))
def add_name(name, info, meta, implicit=False):
@@ -407,13 +562,11 @@ def add_name(name, info, meta, implicit=False):
# FIXME should reject names that differ only in '_' vs. '.'
# vs. '-', because they're liable to clash in generated C.
if name in all_names:
- raise QAPIExprError(info,
- "%s '%s' is already defined"
- % (all_names[name], name))
+ raise QAPISemError(info, "%s '%s' is already defined"
+ % (all_names[name], name))
if not implicit and (name.endswith('Kind') or name.endswith('List')):
- raise QAPIExprError(info,
- "%s '%s' should not end in '%s'"
- % (meta, name, name[-4:]))
+ raise QAPISemError(info, "%s '%s' should not end in '%s'"
+ % (meta, name, name[-4:]))
all_names[name] = meta
@@ -465,7 +618,7 @@ def is_enum(name):
return find_enum(name) is not None
-def check_type(expr_info, source, value, allow_array=False,
+def check_type(info, source, value, allow_array=False,
allow_dict=False, allow_optional=False,
allow_metas=[]):
global all_names
@@ -476,69 +629,64 @@ def check_type(expr_info, source, value, allow_array=False,
# Check if array type for value is okay
if isinstance(value, list):
if not allow_array:
- raise QAPIExprError(expr_info,
- "%s cannot be an array" % source)
+ raise QAPISemError(info, "%s cannot be an array" % source)
if len(value) != 1 or not isinstance(value[0], str):
- raise QAPIExprError(expr_info,
- "%s: array type must contain single type name"
- % source)
+ raise QAPISemError(info,
+ "%s: array type must contain single type name" %
+ source)
value = value[0]
# Check if type name for value is okay
if isinstance(value, str):
if value not in all_names:
- raise QAPIExprError(expr_info,
- "%s uses unknown type '%s'"
- % (source, value))
+ raise QAPISemError(info, "%s uses unknown type '%s'"
+ % (source, value))
if not all_names[value] in allow_metas:
- raise QAPIExprError(expr_info,
- "%s cannot use %s type '%s'"
- % (source, all_names[value], value))
+ raise QAPISemError(info, "%s cannot use %s type '%s'" %
+ (source, all_names[value], value))
return
if not allow_dict:
- raise QAPIExprError(expr_info,
- "%s should be a type name" % source)
+ raise QAPISemError(info, "%s should be a type name" % source)
if not isinstance(value, OrderedDict):
- raise QAPIExprError(expr_info,
- "%s should be a dictionary or type name" % source)
+ raise QAPISemError(info,
+ "%s should be a dictionary or type name" % source)
# value is a dictionary, check that each member is okay
for (key, arg) in value.items():
- check_name(expr_info, "Member of %s" % source, key,
+ check_name(info, "Member of %s" % source, key,
allow_optional=allow_optional)
if c_name(key, False) == 'u' or c_name(key, False).startswith('has_'):
- raise QAPIExprError(expr_info,
- "Member of %s uses reserved name '%s'"
- % (source, key))
+ raise QAPISemError(info, "Member of %s uses reserved name '%s'"
+ % (source, key))
# Todo: allow dictionaries to represent default values of
# an optional argument.
- check_type(expr_info, "Member '%s' of %s" % (key, source), arg,
+ check_type(info, "Member '%s' of %s" % (key, source), arg,
allow_array=True,
allow_metas=['built-in', 'union', 'alternate', 'struct',
'enum'])
-def check_command(expr, expr_info):
+def check_command(expr, info):
name = expr['command']
boxed = expr.get('boxed', False)
args_meta = ['struct']
if boxed:
args_meta += ['union', 'alternate']
- check_type(expr_info, "'data' for command '%s'" % name,
+ check_type(info, "'data' for command '%s'" % name,
expr.get('data'), allow_dict=not boxed, allow_optional=True,
allow_metas=args_meta)
returns_meta = ['union', 'struct']
if name in returns_whitelist:
returns_meta += ['built-in', 'alternate', 'enum']
- check_type(expr_info, "'returns' for command '%s'" % name,
+ check_type(info, "'returns' for command '%s'" % name,
expr.get('returns'), allow_array=True,
allow_optional=True, allow_metas=returns_meta)
-def check_event(expr, expr_info):
+def check_event(expr, info):
global events
name = expr['event']
boxed = expr.get('boxed', False)
@@ -547,12 +695,12 @@ def check_event(expr, expr_info):
if boxed:
meta += ['union', 'alternate']
events.append(name)
- check_type(expr_info, "'data' for event '%s'" % name,
+ check_type(info, "'data' for event '%s'" % name,
expr.get('data'), allow_dict=not boxed, allow_optional=True,
allow_metas=meta)
-def check_union(expr, expr_info):
+def check_union(expr, info):
name = expr['union']
base = expr.get('base')
discriminator = expr.get('discriminator')
@@ -565,123 +713,117 @@ def check_union(expr, expr_info):
enum_define = None
allow_metas = ['built-in', 'union', 'alternate', 'struct', 'enum']
if base is not None:
- raise QAPIExprError(expr_info,
- "Simple union '%s' must not have a base"
- % name)
+ raise QAPISemError(info, "Simple union '%s' must not have a base" %
+ name)
# Else, it's a flat union.
else:
# The object must have a string or dictionary 'base'.
- check_type(expr_info, "'base' for union '%s'" % name,
+ check_type(info, "'base' for union '%s'" % name,
base, allow_dict=True, allow_optional=True,
allow_metas=['struct'])
if not base:
- raise QAPIExprError(expr_info,
- "Flat union '%s' must have a base"
- % name)
+ raise QAPISemError(info, "Flat union '%s' must have a base"
+ % name)
base_members = find_base_members(base)
assert base_members
# The value of member 'discriminator' must name a non-optional
# member of the base struct.
- check_name(expr_info, "Discriminator of flat union '%s'" % name,
+ check_name(info, "Discriminator of flat union '%s'" % name,
discriminator)
discriminator_type = base_members.get(discriminator)
if not discriminator_type:
- raise QAPIExprError(expr_info,
- "Discriminator '%s' is not a member of base "
- "struct '%s'"
- % (discriminator, base))
+ raise QAPISemError(info,
+ "Discriminator '%s' is not a member of base "
+ "struct '%s'"
+ % (discriminator, base))
enum_define = find_enum(discriminator_type)
allow_metas = ['struct']
# Do not allow string discriminator
if not enum_define:
- raise QAPIExprError(expr_info,
- "Discriminator '%s' must be of enumeration "
- "type" % discriminator)
+ raise QAPISemError(info,
+ "Discriminator '%s' must be of enumeration "
+ "type" % discriminator)
# Check every branch; don't allow an empty union
if len(members) == 0:
- raise QAPIExprError(expr_info,
- "Union '%s' cannot have empty 'data'" % name)
+ raise QAPISemError(info, "Union '%s' cannot have empty 'data'" % name)
for (key, value) in members.items():
- check_name(expr_info, "Member of union '%s'" % name, key)
+ check_name(info, "Member of union '%s'" % name, key)
# Each value must name a known type
- check_type(expr_info, "Member '%s' of union '%s'" % (key, name),
+ check_type(info, "Member '%s' of union '%s'" % (key, name),
value, allow_array=not base, allow_metas=allow_metas)
# If the discriminator names an enum type, then all members
# of 'data' must also be members of the enum type.
if enum_define:
if key not in enum_define['enum_values']:
- raise QAPIExprError(expr_info,
- "Discriminator value '%s' is not found in "
- "enum '%s'" %
- (key, enum_define["enum_name"]))
+ raise QAPISemError(info,
+ "Discriminator value '%s' is not found in "
+ "enum '%s'"
+ % (key, enum_define["enum_name"]))
# If discriminator is user-defined, ensure all values are covered
if enum_define:
for value in enum_define['enum_values']:
if value not in members.keys():
- raise QAPIExprError(expr_info,
- "Union '%s' data missing '%s' branch"
- % (name, value))
+ raise QAPISemError(info, "Union '%s' data missing '%s' branch"
+ % (name, value))
-def check_alternate(expr, expr_info):
+def check_alternate(expr, info):
name = expr['alternate']
members = expr['data']
types_seen = {}
# Check every branch; require at least two branches
if len(members) < 2:
- raise QAPIExprError(expr_info,
- "Alternate '%s' should have at least two branches "
- "in 'data'" % name)
+ raise QAPISemError(info,
+ "Alternate '%s' should have at least two branches "
+ "in 'data'" % name)
for (key, value) in members.items():
- check_name(expr_info, "Member of alternate '%s'" % name, key)
+ check_name(info, "Member of alternate '%s'" % name, key)
# Ensure alternates have no type conflicts.
- check_type(expr_info, "Member '%s' of alternate '%s'" % (key, name),
+ check_type(info, "Member '%s' of alternate '%s'" % (key, name),
value,
allow_metas=['built-in', 'union', 'struct', 'enum'])
qtype = find_alternate_member_qtype(value)
if not qtype:
- raise QAPIExprError(expr_info,
- "Alternate '%s' member '%s' cannot use "
- "type '%s'" % (name, key, value))
+ raise QAPISemError(info, "Alternate '%s' member '%s' cannot use "
+ "type '%s'" % (name, key, value))
if qtype in types_seen:
- raise QAPIExprError(expr_info,
- "Alternate '%s' member '%s' can't "
- "be distinguished from member '%s'"
- % (name, key, types_seen[qtype]))
+ raise QAPISemError(info, "Alternate '%s' member '%s' can't "
+ "be distinguished from member '%s'"
+ % (name, key, types_seen[qtype]))
types_seen[qtype] = key
-def check_enum(expr, expr_info):
+def check_enum(expr, info):
name = expr['enum']
members = expr.get('data')
prefix = expr.get('prefix')
if not isinstance(members, list):
- raise QAPIExprError(expr_info,
- "Enum '%s' requires an array for 'data'" % name)
+ raise QAPISemError(info,
+ "Enum '%s' requires an array for 'data'" % name)
if prefix is not None and not isinstance(prefix, str):
- raise QAPIExprError(expr_info,
- "Enum '%s' requires a string for 'prefix'" % name)
+ raise QAPISemError(info,
+ "Enum '%s' requires a string for 'prefix'" % name)
for member in members:
- check_name(expr_info, "Member of enum '%s'" % name, member,
+ check_name(info, "Member of enum '%s'" % name, member,
enum_member=True)
-def check_struct(expr, expr_info):
+def check_struct(expr, info):
name = expr['struct']
members = expr['data']
- check_type(expr_info, "'data' for struct '%s'" % name, members,
+ check_type(info, "'data' for struct '%s'" % name, members,
allow_dict=True, allow_optional=True)
- check_type(expr_info, "'base' for struct '%s'" % name, expr.get('base'),
+ check_type(info, "'base' for struct '%s'" % name, expr.get('base'),
allow_metas=['struct'])
@@ -690,27 +832,24 @@ def check_keys(expr_elem, meta, required, optional=[]):
info = expr_elem['info']
name = expr[meta]
if not isinstance(name, str):
- raise QAPIExprError(info,
- "'%s' key must have a string value" % meta)
+ raise QAPISemError(info, "'%s' key must have a string value" % meta)
required = required + [meta]
for (key, value) in expr.items():
if key not in required and key not in optional:
- raise QAPIExprError(info,
- "Unknown key '%s' in %s '%s'"
- % (key, meta, name))
+ raise QAPISemError(info, "Unknown key '%s' in %s '%s'"
+ % (key, meta, name))
if (key == 'gen' or key == 'success-response') and value is not False:
- raise QAPIExprError(info,
- "'%s' of %s '%s' should only use false value"
- % (key, meta, name))
+ raise QAPISemError(info,
+ "'%s' of %s '%s' should only use false value"
+ % (key, meta, name))
if key == 'boxed' and value is not True:
- raise QAPIExprError(info,
- "'%s' of %s '%s' should only use true value"
- % (key, meta, name))
+ raise QAPISemError(info,
+ "'%s' of %s '%s' should only use true value"
+ % (key, meta, name))
for key in required:
if key not in expr:
- raise QAPIExprError(info,
- "Key '%s' is missing from %s '%s'"
- % (key, meta, name))
+ raise QAPISemError(info, "Key '%s' is missing from %s '%s'"
+ % (key, meta, name))
def check_exprs(exprs):
@@ -722,6 +861,11 @@ def check_exprs(exprs):
for expr_elem in exprs:
expr = expr_elem['expr']
info = expr_elem['info']
+
+ if 'doc' not in expr_elem:
+ raise QAPISemError(info,
+ "Expression missing documentation comment")
+
if 'enum' in expr:
check_keys(expr_elem, 'enum', ['data'], ['prefix'])
add_enum(expr['enum'], info, expr['data'])
@@ -743,8 +887,8 @@ def check_exprs(exprs):
check_keys(expr_elem, 'event', [], ['data', 'boxed'])
add_name(expr['event'], info, 'event')
else:
- raise QAPIExprError(expr_elem['info'],
- "Expression is missing metatype")
+ raise QAPISemError(expr_elem['info'],
+ "Expression is missing metatype")
# Try again for hidden UnionKind enum
for expr_elem in exprs:
@@ -780,6 +924,88 @@ def check_exprs(exprs):
return exprs
+def check_freeform_doc(doc):
+ if doc.symbol:
+ raise QAPISemError(doc.info,
+ "Documention for '%s' is not followed"
+ " by the definition" % doc.symbol)
+
+ body = str(doc.body)
+ if re.search(r'@\S+:', body, re.MULTILINE):
+ raise QAPISemError(doc.info,
+ "Free-form documentation block must not contain"
+ " @NAME: sections")
+
+
+def check_definition_doc(doc, expr, info):
+ for i in ('enum', 'union', 'alternate', 'struct', 'command', 'event'):
+ if i in expr:
+ meta = i
+ break
+
+ name = expr[meta]
+ if doc.symbol != name:
+ raise QAPISemError(info, "Definition of '%s' follows documentation"
+ " for '%s'" % (name, doc.symbol))
+ if doc.has_section('Returns') and 'command' not in expr:
+ raise QAPISemError(info, "'Returns:' is only valid for commands")
+
+ if meta == 'union':
+ args = expr.get('base', [])
+ else:
+ args = expr.get('data', [])
+ if isinstance(args, str):
+ return
+ if isinstance(args, dict):
+ args = args.keys()
+ assert isinstance(args, list)
+
+ if (meta == 'alternate'
+ or (meta == 'union' and not expr.get('discriminator'))):
+ args.append('type')
+
+ for arg in args:
+ if arg[0] == '*':
+ opt = True
+ desc = doc.args.get(arg[1:])
+ else:
+ opt = False
+ desc = doc.args.get(arg)
+ if not desc:
+ continue
+ desc_opt = "#optional" in str(desc)
+ if desc_opt and not opt:
+ raise QAPISemError(info, "Description has #optional, "
+ "but the declaration doesn't")
+ if not desc_opt and opt:
+ # silently fix the doc
+ # TODO either fix the schema and make this an error,
+ # or drop #optional entirely
+ desc.append("#optional")
+
+ doc_args = set(doc.args.keys())
+ args = set([name.strip('*') for name in args])
+ if not doc_args.issubset(args):
+ raise QAPISemError(info, "The following documented members are not in "
+ "the declaration: %s" % ", ".join(doc_args - args))
+
+
+def check_docs(docs):
+ for doc in docs:
+ for section in doc.args.values() + doc.sections:
+ content = str(section)
+ if not content or content.isspace():
+ raise QAPISemError(doc.info,
+ "Empty doc section '%s'" % section.name)
+
+ if not doc.expr:
+ check_freeform_doc(doc)
+ else:
+ check_definition_doc(doc, doc.expr, doc.info)
+
+ return docs
+
+
#
# Schema compiler frontend
#
@@ -978,8 +1204,8 @@ class QAPISchemaObjectType(QAPISchemaType):
def check(self, schema):
if self.members is False: # check for cycles
- raise QAPIExprError(self.info,
- "Object %s contains itself" % self.name)
+ raise QAPISemError(self.info,
+ "Object %s contains itself" % self.name)
if self.members:
return
self.members = False # mark as being checked
@@ -1051,12 +1277,11 @@ class QAPISchemaMember(object):
def check_clash(self, info, seen):
cname = c_name(self.name)
if cname.lower() != cname and self.owner not in case_whitelist:
- raise QAPIExprError(info,
- "%s should not use uppercase" % self.describe())
+ raise QAPISemError(info,
+ "%s should not use uppercase" % self.describe())
if cname in seen:
- raise QAPIExprError(info,
- "%s collides with %s"
- % (self.describe(), seen[cname].describe()))
+ raise QAPISemError(info, "%s collides with %s" %
+ (self.describe(), seen[cname].describe()))
seen[cname] = self
def _pretty_owner(self):
@@ -1201,14 +1426,13 @@ class QAPISchemaCommand(QAPISchemaEntity):
self.arg_type.check(schema)
if self.boxed:
if self.arg_type.is_empty():
- raise QAPIExprError(self.info,
- "Cannot use 'boxed' with empty type")
+ raise QAPISemError(self.info,
+ "Cannot use 'boxed' with empty type")
else:
assert not isinstance(self.arg_type, QAPISchemaAlternateType)
assert not self.arg_type.variants
elif self.boxed:
- raise QAPIExprError(self.info,
- "Use of 'boxed' requires 'data'")
+ raise QAPISemError(self.info, "Use of 'boxed' requires 'data'")
if self._ret_type_name:
self.ret_type = schema.lookup_type(self._ret_type_name)
assert isinstance(self.ret_type, QAPISchemaType)
@@ -1235,14 +1459,13 @@ class QAPISchemaEvent(QAPISchemaEntity):
self.arg_type.check(schema)
if self.boxed:
if self.arg_type.is_empty():
- raise QAPIExprError(self.info,
- "Cannot use 'boxed' with empty type")
+ raise QAPISemError(self.info,
+ "Cannot use 'boxed' with empty type")
else:
assert not isinstance(self.arg_type, QAPISchemaAlternateType)
assert not self.arg_type.variants
elif self.boxed:
- raise QAPIExprError(self.info,
- "Use of 'boxed' requires 'data'")
+ raise QAPISemError(self.info, "Use of 'boxed' requires 'data'")
def visit(self, visitor):
visitor.visit_event(self.name, self.info, self.arg_type, self.boxed)
@@ -1251,14 +1474,16 @@ class QAPISchemaEvent(QAPISchemaEntity):
class QAPISchema(object):
def __init__(self, fname):
try:
- self.exprs = check_exprs(QAPISchemaParser(open(fname, "r")).exprs)
+ parser = QAPISchemaParser(open(fname, "r"))
+ self.exprs = check_exprs(parser.exprs)
+ self.docs = check_docs(parser.docs)
self._entity_dict = {}
self._predefining = True
self._def_predefineds()
self._predefining = False
self._def_exprs()
self.check()
- except (QAPISchemaError, QAPIExprError) as err:
+ except QAPIError as err:
print >>sys.stderr, err
exit(1)
diff --git a/scripts/qapi2texi.py b/scripts/qapi2texi.py
new file mode 100755
index 0000000000..83ded95c2d
--- /dev/null
+++ b/scripts/qapi2texi.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python
+# QAPI texi generator
+#
+# This work is licensed under the terms of the GNU LGPL, version 2+.
+# See the COPYING file in the top-level directory.
+"""This script produces the documentation of a qapi schema in texinfo format"""
+import re
+import sys
+
+import qapi
+
+COMMAND_FMT = """
+@deftypefn {type} {{}} {name}
+
+{body}
+
+@end deftypefn
+
+""".format
+
+ENUM_FMT = """
+@deftp Enum {name}
+
+{body}
+
+@end deftp
+
+""".format
+
+STRUCT_FMT = """
+@deftp {{{type}}} {name}
+
+{body}
+
+@end deftp
+
+""".format
+
+EXAMPLE_FMT = """@example
+{code}
+@end example
+""".format
+
+
+def subst_strong(doc):
+ """Replaces *foo* by @strong{foo}"""
+ return re.sub(r'\*([^*\n]+)\*', r'@emph{\1}', doc)
+
+
+def subst_emph(doc):
+ """Replaces _foo_ by @emph{foo}"""
+ return re.sub(r'\b_([^_\n]+)_\b', r' @emph{\1} ', doc)
+
+
+def subst_vars(doc):
+ """Replaces @var by @code{var}"""
+ return re.sub(r'@([\w-]+)', r'@code{\1}', doc)
+
+
+def subst_braces(doc):
+ """Replaces {} with @{ @}"""
+ return doc.replace("{", "@{").replace("}", "@}")
+
+
+def texi_example(doc):
+ """Format @example"""
+ # TODO: Neglects to escape @ characters.
+ # We should probably escape them in subst_braces(), and rename the
+ # function to subst_special() or subs_texi_special(). If we do that, we
+ # need to delay it until after subst_vars() in texi_format().
+ doc = subst_braces(doc).strip('\n')
+ return EXAMPLE_FMT(code=doc)
+
+
+def texi_format(doc):
+ """
+ Format documentation
+
+ Lines starting with:
+ - |: generates an @example
+ - =: generates @section
+ - ==: generates @subsection
+ - 1. or 1): generates an @enumerate @item
+ - */-: generates an @itemize list
+ """
+ lines = []
+ doc = subst_braces(doc)
+ doc = subst_vars(doc)
+ doc = subst_emph(doc)
+ doc = subst_strong(doc)
+ inlist = ""
+ lastempty = False
+ for line in doc.split('\n'):
+ empty = line == ""
+
+ # FIXME: Doing this in a single if / elif chain is
+ # problematic. For instance, a line without markup terminates
+ # a list if it follows a blank line (reaches the final elif),
+ # but a line with some *other* markup, such as a = title
+ # doesn't.
+ #
+ # Make sure to update section "Documentation markup" in
+ # docs/qapi-code-gen.txt when fixing this.
+ if line.startswith("| "):
+ line = EXAMPLE_FMT(code=line[2:])
+ elif line.startswith("= "):
+ line = "@section " + line[2:]
+ elif line.startswith("== "):
+ line = "@subsection " + line[3:]
+ elif re.match(r'^([0-9]*\.) ', line):
+ if not inlist:
+ lines.append("@enumerate")
+ inlist = "enumerate"
+ line = line[line.find(" ")+1:]
+ lines.append("@item")
+ elif re.match(r'^[*-] ', line):
+ if not inlist:
+ lines.append("@itemize %s" % {'*': "@bullet",
+ '-': "@minus"}[line[0]])
+ inlist = "itemize"
+ lines.append("@item")
+ line = line[2:]
+ elif lastempty and inlist:
+ lines.append("@end %s\n" % inlist)
+ inlist = ""
+
+ lastempty = empty
+ lines.append(line)
+
+ if inlist:
+ lines.append("@end %s\n" % inlist)
+ return "\n".join(lines)
+
+
+def texi_body(doc):
+ """
+ Format the body of a symbol documentation:
+ - main body
+ - table of arguments
+ - followed by "Returns/Notes/Since/Example" sections
+ """
+ body = texi_format(str(doc.body)) + "\n"
+ if doc.args:
+ body += "@table @asis\n"
+ for arg, section in doc.args.iteritems():
+ desc = str(section)
+ opt = ''
+ if "#optional" in desc:
+ desc = desc.replace("#optional", "")
+ opt = ' (optional)'
+ body += "@item @code{'%s'}%s\n%s\n" % (arg, opt,
+ texi_format(desc))
+ body += "@end table\n"
+
+ for section in doc.sections:
+ name, doc = (section.name, str(section))
+ func = texi_format
+ if name.startswith("Example"):
+ func = texi_example
+
+ if name:
+ # FIXME the indentation produced by @quotation in .txt and
+ # .html output is confusing
+ body += "\n@quotation %s\n%s\n@end quotation" % \
+ (name, func(doc))
+ else:
+ body += func(doc)
+
+ return body
+
+
+def texi_alternate(expr, doc):
+ """Format an alternate to texi"""
+ body = texi_body(doc)
+ return STRUCT_FMT(type="Alternate",
+ name=doc.symbol,
+ body=body)
+
+
+def texi_union(expr, doc):
+ """Format a union to texi"""
+ discriminator = expr.get("discriminator")
+ if discriminator:
+ union = "Flat Union"
+ else:
+ union = "Simple Union"
+
+ body = texi_body(doc)
+ return STRUCT_FMT(type=union,
+ name=doc.symbol,
+ body=body)
+
+
+def texi_enum(expr, doc):
+ """Format an enum to texi"""
+ for i in expr['data']:
+ if i not in doc.args:
+ doc.args[i] = ''
+ body = texi_body(doc)
+ return ENUM_FMT(name=doc.symbol,
+ body=body)
+
+
+def texi_struct(expr, doc):
+ """Format a struct to texi"""
+ body = texi_body(doc)
+ return STRUCT_FMT(type="Struct",
+ name=doc.symbol,
+ body=body)
+
+
+def texi_command(expr, doc):
+ """Format a command to texi"""
+ body = texi_body(doc)
+ return COMMAND_FMT(type="Command",
+ name=doc.symbol,
+ body=body)
+
+
+def texi_event(expr, doc):
+ """Format an event to texi"""
+ body = texi_body(doc)
+ return COMMAND_FMT(type="Event",
+ name=doc.symbol,
+ body=body)
+
+
+def texi_expr(expr, doc):
+ """Format an expr to texi"""
+ (kind, _) = expr.items()[0]
+
+ fmt = {"command": texi_command,
+ "struct": texi_struct,
+ "enum": texi_enum,
+ "union": texi_union,
+ "alternate": texi_alternate,
+ "event": texi_event}[kind]
+
+ return fmt(expr, doc)
+
+
+def texi(docs):
+ """Convert QAPI schema expressions to texi documentation"""
+ res = []
+ for doc in docs:
+ expr = doc.expr
+ if not expr:
+ res.append(texi_body(doc))
+ continue
+ try:
+ doc = texi_expr(expr, doc)
+ res.append(doc)
+ except:
+ print >>sys.stderr, "error at @%s" % doc.info
+ raise
+
+ return '\n'.join(res)
+
+
+def main(argv):
+ """Takes schema argument, prints result to stdout"""
+ if len(argv) != 2:
+ print >>sys.stderr, "%s: need exactly 1 argument: SCHEMA" % argv[0]
+ sys.exit(1)
+
+ schema = qapi.QAPISchema(argv[1])
+ print texi(schema.docs)
+
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/scripts/texi2pod.pl b/scripts/texi2pod.pl
index 8767662d30..6e8fec41a1 100755
--- a/scripts/texi2pod.pl
+++ b/scripts/texi2pod.pl
@@ -37,6 +37,7 @@ $inf = "";
$ibase = "";
@ipath = ();
$encoding = undef;
+@args = ();
while ($_ = shift) {
if (/^-D(.*)$/) {
@@ -162,7 +163,8 @@ while(<$inf>) {
if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
$skipping = pop @skstack;
next;
- } elsif ($ended =~ /^(?:example|smallexample|display)$/) {
+ } elsif ($ended =~ /^(?:example|smallexample|display
+ |quotation|deftp|deftypefn)$/x) {
$shift = "";
$_ = ""; # need a paragraph break
} elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
@@ -303,6 +305,7 @@ while(<$inf>) {
$ic =~ s/\@(?:code|kbd)/C/;
$ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
$ic =~ s/\@(?:file)/F/;
+ $ic =~ s/\@(?:asis)//;
$_ = "\n=over 4\n";
};
@@ -323,10 +326,54 @@ while(<$inf>) {
$_ = "\n=item ".join (" : ", @columns)."\n";
};
+ /^\@(quotation)\s*(.+)?$/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $_ = "\n$2:"
+ };
+
+ /^{(.*)}$|^(.*)$/ and $#args > 0 and do {
+ $kind = $args[0];
+ $arguments = $1 // "";
+ if ($endw eq "deftypefn") {
+ $ret = $args[1];
+ $fname = "B<$args[2]>";
+ $_ = $ret ? "$ret " : "";
+ $_ .= "$fname $arguments ($kind)";
+ } else {
+ $_ = "B<$args[1]> ($kind)\n\n$arguments";
+ }
+ @args = ();
+ };
+
+ /^\@(deftp)\s*(.+)?$/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $arg = $2;
+ $arg =~ s/{([^}]*)}/$1/g;
+ $arg =~ s/\@$//;
+ @args = split (/ /, $arg);
+ $_ = "";
+ };
+
+ /^\@(deftypefn)\s*(.+)?$/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $arg = $2;
+ $arg =~ s/{([^}]*)}/$1/g;
+ $arg =~ s/\@$//;
+ @args = split (/ /, $arg);
+ $_ = "";
+ };
+
/^\@itemx?\s*(.+)?$/ and do {
if (defined $1) {
- # Entity escapes prevent munging by the <> processing below.
- $_ = "\n=item $ic\&LT;$1\&GT;\n";
+ if ($ic eq "") {
+ $_ = "\n=item $1\n";
+ } else {
+ # Entity escapes prevent munging by the <> processing below.
+ $_ = "\n=item $ic\&LT;$1\&GT;\n";
+ }
} else {
$_ = "\n=item $ic\n";
$ic =~ y/A-Ya-y/B-Zb-z/;
@@ -388,6 +435,7 @@ sub postprocess
s/\@sc\{([^\}]*)\}/\U$1/g;
s/\@file\{([^\}]*)\}/F<$1>/g;
s/\@w\{([^\}]*)\}/S<$1>/g;
+ s/\@t\{([^\}]*)\}/$1/g;
s/\@(?:dmn|math)\{([^\}]*)\}/$1/g;
# keep references of the form @ref{...}, print them bold
diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh
index 08c4c4ae54..72cf1fbf0a 100755
--- a/scripts/update-linux-headers.sh
+++ b/scripts/update-linux-headers.sh
@@ -51,7 +51,7 @@ cp_portable() {
-e 's/__be\([0-9][0-9]*\)/uint\1_t/g' \
-e 's/"\(input-event-codes\.h\)"/"standard-headers\/linux\/\1"/' \
-e 's/<linux\/\([^>]*\)>/"standard-headers\/linux\/\1"/' \
- -e 's/__bitwise__//' \
+ -e 's/__bitwise//' \
-e 's/__attribute__((packed))/QEMU_PACKED/' \
-e 's/__inline__/inline/' \
-e '/sys\/ioctl.h/d' \
diff --git a/stubs/Makefile.objs b/stubs/Makefile.objs
index 2b5bb74fce..a187295161 100644
--- a/stubs/Makefile.objs
+++ b/stubs/Makefile.objs
@@ -10,26 +10,18 @@ stub-obj-y += cpu-get-clock.o
stub-obj-y += cpu-get-icount.o
stub-obj-y += dump.o
stub-obj-y += error-printf.o
-stub-obj-y += fdset-add-fd.o
-stub-obj-y += fdset-find-fd.o
-stub-obj-y += fdset-get-fd.o
-stub-obj-y += fdset-remove-fd.o
+stub-obj-y += fdset.o
stub-obj-y += gdbstub.o
-stub-obj-y += get-fd.o
-stub-obj-y += get-next-serial.o
stub-obj-y += get-vm-name.o
stub-obj-y += iothread.o
stub-obj-y += iothread-lock.o
stub-obj-y += is-daemonized.o
stub-obj-y += machine-init-done.o
stub-obj-y += migr-blocker.o
-stub-obj-y += mon-is-qmp.o
-stub-obj-y += monitor-init.o
+stub-obj-y += monitor.o
stub-obj-y += notify-event.o
stub-obj-y += qtest.o
stub-obj-y += replay.o
-stub-obj-y += replay-user.o
-stub-obj-y += reset.o
stub-obj-y += runstate-check.o
stub-obj-y += set-fd-handler.o
stub-obj-y += slirp.o
@@ -39,14 +31,7 @@ stub-obj-y += uuid.o
stub-obj-y += vm-stop.o
stub-obj-y += vmstate.o
stub-obj-$(CONFIG_WIN32) += fd-register.o
-stub-obj-y += cpus.o
-stub-obj-y += kvm.o
stub-obj-y += qmp_pc_dimm_device_list.o
stub-obj-y += target-monitor-defs.o
stub-obj-y += target-get-monitor-def.o
-stub-obj-y += vhost.o
-stub-obj-y += iohandler.o
-stub-obj-y += smbios_type_38.o
-stub-obj-y += ipmi.o
stub-obj-y += pc_madt_cpu_entry.o
-stub-obj-y += migration-colo.o
diff --git a/stubs/cpus.c b/stubs/cpus.c
deleted file mode 100644
index e19272297a..0000000000
--- a/stubs/cpus.c
+++ /dev/null
@@ -1,11 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "qom/cpu.h"
-
-void cpu_resume(CPUState *cpu)
-{
-}
-
-void qemu_init_vcpu(CPUState *cpu)
-{
-}
diff --git a/stubs/fdset-add-fd.c b/stubs/fdset-add-fd.c
deleted file mode 100644
index bf9e60aed5..0000000000
--- a/stubs/fdset-add-fd.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd)
-{
- return -1;
-}
diff --git a/stubs/fdset-find-fd.c b/stubs/fdset-find-fd.c
deleted file mode 100644
index 1d9caf37ec..0000000000
--- a/stubs/fdset-find-fd.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-int monitor_fdset_dup_fd_find(int dup_fd)
-{
- return -1;
-}
diff --git a/stubs/fdset-get-fd.c b/stubs/fdset-get-fd.c
deleted file mode 100644
index 5325044b5a..0000000000
--- a/stubs/fdset-get-fd.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-int monitor_fdset_get_fd(int64_t fdset_id, int flags)
-{
- return -1;
-}
diff --git a/stubs/fdset-remove-fd.c b/stubs/fdset-remove-fd.c
deleted file mode 100644
index 47ea297210..0000000000
--- a/stubs/fdset-remove-fd.c
+++ /dev/null
@@ -1,7 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-void monitor_fdset_dup_fd_remove(int dupfd)
-{
-}
diff --git a/stubs/fdset.c b/stubs/fdset.c
new file mode 100644
index 0000000000..6020cf28c8
--- /dev/null
+++ b/stubs/fdset.c
@@ -0,0 +1,22 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "monitor/monitor.h"
+
+int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd)
+{
+ return -1;
+}
+
+int monitor_fdset_dup_fd_find(int dup_fd)
+{
+ return -1;
+}
+
+int monitor_fdset_get_fd(int64_t fdset_id, int flags)
+{
+ return -1;
+}
+
+void monitor_fdset_dup_fd_remove(int dupfd)
+{
+}
diff --git a/stubs/get-next-serial.c b/stubs/get-next-serial.c
deleted file mode 100644
index 6ff6a6d3b2..0000000000
--- a/stubs/get-next-serial.c
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-
-CharDriverState *serial_hds[0];
diff --git a/stubs/iohandler.c b/stubs/iohandler.c
deleted file mode 100644
index 22b0ee5b0a..0000000000
--- a/stubs/iohandler.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "qemu/main-loop.h"
-
-AioContext *iohandler_get_aio_context(void)
-{
- abort();
-}
diff --git a/stubs/kvm.c b/stubs/kvm.c
deleted file mode 100644
index ddd620499d..0000000000
--- a/stubs/kvm.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "sysemu/kvm.h"
-
-int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
-{
- return 0;
-}
diff --git a/stubs/migr-blocker.c b/stubs/migr-blocker.c
index 8ab3604dfa..a5ba18f53d 100644
--- a/stubs/migr-blocker.c
+++ b/stubs/migr-blocker.c
@@ -2,8 +2,9 @@
#include "qemu-common.h"
#include "migration/migration.h"
-void migrate_add_blocker(Error *reason)
+int migrate_add_blocker(Error *reason, Error **errp)
{
+ return 0;
}
void migrate_del_blocker(Error *reason)
diff --git a/stubs/migration-colo.c b/stubs/migration-colo.c
deleted file mode 100644
index 7811764c4b..0000000000
--- a/stubs/migration-colo.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * COarse-grain LOck-stepping Virtual Machines for Non-stop Service (COLO)
- * (a.k.a. Fault Tolerance or Continuous Replication)
- *
- * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
- * Copyright (c) 2016 FUJITSU LIMITED
- * Copyright (c) 2016 Intel Corporation
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or
- * later. See the COPYING file in the top-level directory.
- */
-
-#include "qemu/osdep.h"
-#include "migration/colo.h"
-#include "qmp-commands.h"
-
-bool colo_supported(void)
-{
- return false;
-}
-
-bool migration_in_colo_state(void)
-{
- return false;
-}
-
-bool migration_incoming_in_colo_state(void)
-{
- return false;
-}
-
-void migrate_start_colo_process(MigrationState *s)
-{
-}
-
-void *colo_process_incoming_thread(void *opaque)
-{
- return NULL;
-}
-
-void qmp_x_colo_lost_heartbeat(Error **errp)
-{
- error_setg(errp, "COLO is not supported, please rerun configure"
- " with --enable-colo option in order to support"
- " COLO feature");
-}
diff --git a/stubs/mon-is-qmp.c b/stubs/mon-is-qmp.c
deleted file mode 100644
index a8344ced80..0000000000
--- a/stubs/mon-is-qmp.c
+++ /dev/null
@@ -1,10 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-Monitor *cur_mon;
-
-bool monitor_cur_is_qmp(void)
-{
- return false;
-}
diff --git a/stubs/monitor-init.c b/stubs/monitor-init.c
deleted file mode 100644
index de1bc7cd54..0000000000
--- a/stubs/monitor-init.c
+++ /dev/null
@@ -1,7 +0,0 @@
-#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "monitor/monitor.h"
-
-void monitor_init(CharDriverState *chr, int flags)
-{
-}
diff --git a/stubs/get-fd.c b/stubs/monitor.c
index 7dfdfb55f7..1d574b1c6f 100644
--- a/stubs/get-fd.c
+++ b/stubs/monitor.c
@@ -3,8 +3,14 @@
#include "qemu-common.h"
#include "monitor/monitor.h"
+Monitor *cur_mon = NULL;
+
int monitor_get_fd(Monitor *mon, const char *name, Error **errp)
{
error_setg(errp, "only QEMU supports file descriptor passing");
return -1;
}
+
+void monitor_init(CharDriverState *chr, int flags)
+{
+}
diff --git a/stubs/pc_madt_cpu_entry.c b/stubs/pc_madt_cpu_entry.c
index 427e772868..f88d6a090b 100644
--- a/stubs/pc_madt_cpu_entry.c
+++ b/stubs/pc_madt_cpu_entry.c
@@ -2,6 +2,6 @@
#include "hw/i386/pc.h"
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
- CPUArchIdList *apic_ids, GArray *entry)
+ const CPUArchIdList *apic_ids, GArray *entry)
{
}
diff --git a/stubs/replay-user.c b/stubs/replay-user.c
deleted file mode 100644
index b29e7ebba1..0000000000
--- a/stubs/replay-user.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * replay.c
- *
- * Copyright (c) 2010-2015 Institute for System Programming
- * of the Russian Academy of Sciences.
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "qemu/osdep.h"
-#include "sysemu/replay.h"
-
-bool replay_exception(void)
-{
- return true;
-}
-
-bool replay_has_exception(void)
-{
- return false;
-}
-
-bool replay_interrupt(void)
-{
- return true;
-}
-
-bool replay_has_interrupt(void)
-{
- return false;
-}
diff --git a/stubs/reset.c b/stubs/reset.c
deleted file mode 100644
index 5d47711f9a..0000000000
--- a/stubs/reset.c
+++ /dev/null
@@ -1,14 +0,0 @@
-#include "qemu/osdep.h"
-#include "hw/hw.h"
-
-/* Stub functions for binaries that never call qemu_devices_reset(),
- * and don't need to keep track of the reset handler list.
- */
-
-void qemu_register_reset(QEMUResetHandler *func, void *opaque)
-{
-}
-
-void qemu_unregister_reset(QEMUResetHandler *func, void *opaque)
-{
-}
diff --git a/stubs/set-fd-handler.c b/stubs/set-fd-handler.c
index 06a5da48f1..acbe65c1da 100644
--- a/stubs/set-fd-handler.c
+++ b/stubs/set-fd-handler.c
@@ -15,6 +15,7 @@ void aio_set_fd_handler(AioContext *ctx,
bool is_external,
IOHandler *io_read,
IOHandler *io_write,
+ AioPollFn *io_poll,
void *opaque)
{
abort();
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
index 30d77ce71c..b4f97983e5 100644
--- a/target/alpha/cpu.c
+++ b/target/alpha/cpu.c
@@ -273,7 +273,7 @@ static void alpha_cpu_initfn(Object *obj)
CPUAlphaState *env = &cpu->env;
cs->env_ptr = env;
- tlb_flush(cs, 1);
+ tlb_flush(cs);
alpha_translate_init();
diff --git a/target/alpha/helper.h b/target/alpha/helper.h
index 004221df8c..d60f208703 100644
--- a/target/alpha/helper.h
+++ b/target/alpha/helper.h
@@ -3,10 +3,6 @@ DEF_HELPER_FLAGS_1(load_pcc, TCG_CALL_NO_RWG_SE, i64, env)
DEF_HELPER_FLAGS_3(check_overflow, TCG_CALL_NO_WG, void, env, i64, i64)
-DEF_HELPER_FLAGS_1(ctpop, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(ctlz, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(cttz, TCG_CALL_NO_RWG_SE, i64, i64)
-
DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64)
diff --git a/target/alpha/int_helper.c b/target/alpha/int_helper.c
index 19bebfe742..e43b50a743 100644
--- a/target/alpha/int_helper.c
+++ b/target/alpha/int_helper.c
@@ -24,21 +24,6 @@
#include "qemu/host-utils.h"
-uint64_t helper_ctpop(uint64_t arg)
-{
- return ctpop64(arg);
-}
-
-uint64_t helper_ctlz(uint64_t arg)
-{
- return clz64(arg);
-}
-
-uint64_t helper_cttz(uint64_t arg)
-{
- return ctz64(arg);
-}
-
uint64_t helper_zapnot(uint64_t val, uint64_t mskb)
{
uint64_t mask;
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
index b99a123a39..a102645315 100644
--- a/target/alpha/machine.c
+++ b/target/alpha/machine.c
@@ -5,17 +5,19 @@
#include "hw/boards.h"
#include "migration/cpu.h"
-static int get_fpcr(QEMUFile *f, void *opaque, size_t size)
+static int get_fpcr(QEMUFile *f, void *opaque, size_t size, VMStateField *field)
{
CPUAlphaState *env = opaque;
cpu_alpha_store_fpcr(env, qemu_get_be64(f));
return 0;
}
-static void put_fpcr(QEMUFile *f, void *opaque, size_t size)
+static int put_fpcr(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
CPUAlphaState *env = opaque;
qemu_put_be64(f, cpu_alpha_load_fpcr(env));
+ return 0;
}
static const VMStateInfo vmstate_fpcr = {
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
index bec1e178be..652195de6f 100644
--- a/target/alpha/sys_helper.c
+++ b/target/alpha/sys_helper.c
@@ -44,7 +44,7 @@ uint64_t helper_load_pcc(CPUAlphaState *env)
#ifndef CONFIG_USER_ONLY
void helper_tbia(CPUAlphaState *env)
{
- tlb_flush(CPU(alpha_env_get_cpu(env)), 1);
+ tlb_flush(CPU(alpha_env_get_cpu(env)));
}
void helper_tbis(CPUAlphaState *env, uint64_t p)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 114927b751..055286a7b8 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -949,7 +949,13 @@ static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
if (islit) {
- tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
+ int pos = (64 - lit * 8) & 0x3f;
+ int len = cto32(byte_mask) * 8;
+ if (pos < len) {
+ tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
+ } else {
+ tcg_gen_movi_i64(vc, 0);
+ }
} else {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
@@ -966,38 +972,44 @@ static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
if (islit) {
- tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
+ int pos = (lit & 7) * 8;
+ int len = cto32(byte_mask) * 8;
+ if (pos + len >= 64) {
+ len = 64 - pos;
+ }
+ tcg_gen_extract_i64(vc, va, pos, len);
} else {
TCGv tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
tcg_gen_shli_i64(tmp, tmp, 3);
tcg_gen_shr_i64(vc, va, tmp);
tcg_temp_free(tmp);
+ gen_zapnoti(vc, vc, byte_mask);
}
- gen_zapnoti(vc, vc, byte_mask);
}
/* INSWH, INSLH, INSQH */
static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- TCGv tmp = tcg_temp_new();
-
- /* The instruction description has us left-shift the byte mask and extract
- bits <15:8> and apply that zap at the end. This is equivalent to simply
- performing the zap first and shifting afterward. */
- gen_zapnoti(tmp, va, byte_mask);
-
if (islit) {
- lit &= 7;
- if (unlikely(lit == 0)) {
- tcg_gen_movi_i64(vc, 0);
+ int pos = 64 - (lit & 7) * 8;
+ int len = cto32(byte_mask) * 8;
+ if (pos < len) {
+ tcg_gen_extract_i64(vc, va, pos, len - pos);
} else {
- tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
+ tcg_gen_movi_i64(vc, 0);
}
} else {
+ TCGv tmp = tcg_temp_new();
TCGv shift = tcg_temp_new();
+ /* The instruction description has us left-shift the byte mask
+ and extract bits <15:8> and apply that zap at the end. This
+ is equivalent to simply performing the zap first and shifting
+ afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
+
/* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
portably by splitting the shift into two parts: shift_count-1 and 1.
Arrange for the -1 by using ones-complement instead of
@@ -1010,32 +1022,37 @@ static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
tcg_gen_shr_i64(vc, tmp, shift);
tcg_gen_shri_i64(vc, vc, 1);
tcg_temp_free(shift);
+ tcg_temp_free(tmp);
}
- tcg_temp_free(tmp);
}
/* INSBL, INSWL, INSLL, INSQL */
static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
uint8_t lit, uint8_t byte_mask)
{
- TCGv tmp = tcg_temp_new();
-
- /* The instruction description has us left-shift the byte mask
- the same number of byte slots as the data and apply the zap
- at the end. This is equivalent to simply performing the zap
- first and shifting afterward. */
- gen_zapnoti(tmp, va, byte_mask);
-
if (islit) {
- tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
+ int pos = (lit & 7) * 8;
+ int len = cto32(byte_mask) * 8;
+ if (pos + len > 64) {
+ len = 64 - pos;
+ }
+ tcg_gen_deposit_z_i64(vc, va, pos, len);
} else {
+ TCGv tmp = tcg_temp_new();
TCGv shift = tcg_temp_new();
+
+ /* The instruction description has us left-shift the byte mask
+ and extract bits <15:8> and apply that zap at the end. This
+ is equivalent to simply performing the zap first and shifting
+ afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
+
tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
tcg_gen_shli_i64(shift, shift, 3);
tcg_gen_shl_i64(vc, tmp, shift);
tcg_temp_free(shift);
+ tcg_temp_free(tmp);
}
- tcg_temp_free(tmp);
}
/* MSKWH, MSKLH, MSKQH */
@@ -2524,7 +2541,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
REQUIRE_REG_31(ra);
REQUIRE_NO_LIT;
- gen_helper_ctpop(vc, vb);
+ tcg_gen_ctpop_i64(vc, vb);
break;
case 0x31:
/* PERR */
@@ -2538,14 +2555,14 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
REQUIRE_REG_31(ra);
REQUIRE_NO_LIT;
- gen_helper_ctlz(vc, vb);
+ tcg_gen_clzi_i64(vc, vb, 64);
break;
case 0x33:
/* CTTZ */
REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
REQUIRE_REG_31(ra);
REQUIRE_NO_LIT;
- gen_helper_cttz(vc, vb);
+ tcg_gen_ctzi_i64(vc, vb, 64);
break;
case 0x34:
/* UNPKBW */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 99f0dbebb9..a941f6611b 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -31,7 +31,7 @@
#endif
#include "hw/arm/arm.h"
#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "kvm_arm.h"
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
@@ -122,7 +122,8 @@ static void arm_cpu_reset(CPUState *s)
acc->parent_reset(s);
- memset(env, 0, offsetof(CPUARMState, features));
+ memset(env, 0, offsetof(CPUARMState, end_reset_fields));
+
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
@@ -226,8 +227,6 @@ static void arm_cpu_reset(CPUState *s)
&env->vfp.fp_status);
set_float_detect_tininess(float_tininess_before_rounding,
&env->vfp.standard_fp_status);
- tlb_flush(s, 1);
-
#ifndef CONFIG_USER_ONLY
if (kvm_enabled()) {
kvm_arm_reset_vcpu(cpu);
@@ -466,6 +465,9 @@ static void arm_cpu_initfn(Object *obj)
arm_gt_stimer_cb, cpu);
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
ARRAY_SIZE(cpu->gt_timer_outputs));
+
+ qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
+ "gicv3-maintenance-interrupt", 1);
#endif
/* DTB consumers generally don't in fact care what the 'compatible'
@@ -494,6 +496,9 @@ static Property arm_cpu_reset_hivecs_property =
static Property arm_cpu_rvbar_property =
DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
+static Property arm_cpu_has_el2_property =
+ DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
+
static Property arm_cpu_has_el3_property =
DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
@@ -544,6 +549,11 @@ static void arm_cpu_post_init(Object *obj)
#endif
}
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
+ &error_abort);
+ }
+
if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
&error_abort);
@@ -597,6 +607,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
} else {
set_feature(env, ARM_FEATURE_V6);
}
+
+ /* Always define VBAR for V7 CPUs even if it doesn't exist in
+ * non-EL3 configs. This is needed by some legacy boards.
+ */
+ set_feature(env, ARM_FEATURE_VBAR);
}
if (arm_feature(env, ARM_FEATURE_V6K)) {
set_feature(env, ARM_FEATURE_V6);
@@ -687,6 +702,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->id_aa64pfr0 &= ~0xf000;
}
+ if (!cpu->has_el2) {
+ unset_feature(env, ARM_FEATURE_EL2);
+ }
+
if (!cpu->has_pmu || !kvm_enabled()) {
cpu->has_pmu = false;
unset_feature(env, ARM_FEATURE_PMU);
@@ -721,6 +740,10 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
}
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ set_feature(env, ARM_FEATURE_VBAR);
+ }
+
register_cp_regs_for_features(cpu);
arm_cpu_register_gdb_regs_for_features(cpu);
@@ -1055,7 +1078,7 @@ static void cortex_a8_initfn(Object *obj)
cpu->midr = 0x410fc080;
cpu->reset_fpsid = 0x410330c0;
cpu->mvfr0 = 0x11110222;
- cpu->mvfr1 = 0x00011100;
+ cpu->mvfr1 = 0x00011111;
cpu->ctr = 0x82048004;
cpu->reset_sctlr = 0x00c50078;
cpu->id_pfr0 = 0x1031;
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index ca5c849ed6..151a5d754e 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -491,9 +491,12 @@ typedef struct CPUARMState {
struct CPUBreakpoint *cpu_breakpoint[16];
struct CPUWatchpoint *cpu_watchpoint[16];
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
- /* These fields after the common ones so they are preserved on reset. */
+ /* Fields after CPU_COMMON are preserved across CPU reset. */
/* Internal CPU feature flags. */
uint64_t features;
@@ -555,6 +558,8 @@ struct ARMCPU {
QEMUTimer *gt_timer[NUM_GTIMERS];
/* GPIO outputs for generic timer */
qemu_irq gt_timer_outputs[NUM_GTIMERS];
+ /* GPIO output for GICv3 maintenance interrupt signal */
+ qemu_irq gicv3_maintenance_interrupt;
/* MemoryRegion to use for secure physical accesses */
MemoryRegion *secure_memory;
@@ -572,6 +577,8 @@ struct ARMCPU {
bool start_powered_off;
/* CPU currently in PSCI powered-off state */
bool powered_off;
+ /* CPU has virtualization extension */
+ bool has_el2;
/* CPU has security extension */
bool has_el3;
/* CPU has PMU (Performance Monitor Unit) */
@@ -657,6 +664,11 @@ struct ARMCPU {
uint32_t dcz_blocksize;
uint64_t rvbar;
+ /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
+ int gic_num_lrs; /* number of list registers */
+ int gic_vpribits; /* number of virtual priority bits */
+ int gic_vprebits; /* number of virtual preemption bits */
+
ARMELChangeHook *el_change_hook;
void *el_change_hook_opaque;
};
@@ -1125,6 +1137,7 @@ enum arm_features {
ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
ARM_FEATURE_PMU, /* has PMU support */
+ ARM_FEATURE_VBAR, /* has cp15 VBAR */
};
static inline int arm_feature(CPUARMState *env, int feature)
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 549cb1ee93..670c07ab6e 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -110,6 +110,7 @@ static void aarch64_a57_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
@@ -147,6 +148,9 @@ static void aarch64_a57_initfn(Object *obj)
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
}
@@ -166,6 +170,7 @@ static void aarch64_a53_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL2);
set_feature(&cpu->env, ARM_FEATURE_EL3);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
@@ -201,6 +206,9 @@ static void aarch64_a53_initfn(Object *obj)
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
cpu->dcz_blocksize = 4; /* 64 bytes */
+ cpu->gic_num_lrs = 4;
+ cpu->gic_vpribits = 5;
+ cpu->gic_vprebits = 5;
define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
}
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
index 98b97df461..d9df82cff5 100644
--- a/target/arm/helper-a64.c
+++ b/target/arm/helper-a64.c
@@ -54,26 +54,6 @@ int64_t HELPER(sdiv64)(int64_t num, int64_t den)
return num / den;
}
-uint64_t HELPER(clz64)(uint64_t x)
-{
- return clz64(x);
-}
-
-uint64_t HELPER(cls64)(uint64_t x)
-{
- return clrsb64(x);
-}
-
-uint32_t HELPER(cls32)(uint32_t x)
-{
- return clrsb32(x);
-}
-
-uint32_t HELPER(clz32)(uint32_t x)
-{
- return clz32(x);
-}
-
uint64_t HELPER(rbit64)(uint64_t x)
{
return revbit64(x);
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
index dd32000e63..6f9eaba533 100644
--- a/target/arm/helper-a64.h
+++ b/target/arm/helper-a64.h
@@ -18,10 +18,6 @@
*/
DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
-DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(clz32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index b5b65caadf..7111c8cf18 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -464,7 +464,7 @@ static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
ARMCPU *cpu = arm_env_get_cpu(env);
raw_write(env, ri, value);
- tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
+ tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
}
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
@@ -475,7 +475,7 @@ static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Unlike real hardware the qemu TLB uses virtual addresses,
* not modified virtual addresses, so this causes a TLB flush.
*/
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
raw_write(env, ri, value);
}
}
@@ -491,7 +491,7 @@ static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
* format) this register includes the ASID, so do a TLB flush.
* For PMSA it is purely a process ID and no action is needed.
*/
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
}
@@ -502,7 +502,7 @@ static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate all (TLBIALL) */
ARMCPU *cpu = arm_env_get_cpu(env);
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -520,7 +520,7 @@ static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* Invalidate by ASID (TLBIASID) */
ARMCPU *cpu = arm_env_get_cpu(env);
- tlb_flush(CPU(cpu), value == 0);
+ tlb_flush(CPU(cpu));
}
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -539,7 +539,7 @@ static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *other_cs;
CPU_FOREACH(other_cs) {
- tlb_flush(other_cs, 1);
+ tlb_flush(other_cs);
}
}
@@ -549,7 +549,7 @@ static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *other_cs;
CPU_FOREACH(other_cs) {
- tlb_flush(other_cs, value == 0);
+ tlb_flush(other_cs);
}
}
@@ -1252,12 +1252,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
.writefn = pmintenclr_write },
- { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW, .writefn = vbar_write,
- .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
- offsetof(CPUARMState, cp15.vbar_ns) },
- .resetvalue = 0 },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
@@ -2310,7 +2304,7 @@ static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
u32p += env->cp15.c6_rgnr;
- tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
+ tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
*u32p = value;
}
@@ -2455,7 +2449,7 @@ static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush.
*/
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
vmsa_ttbcr_raw_write(env, ri, value);
}
@@ -2479,7 +2473,7 @@ static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
TCR *tcr = raw_ptr(env, ri);
/* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
tcr->raw_tcr = value;
}
@@ -2492,7 +2486,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
if (cpreg_field_is_64bit(ri)) {
ARMCPU *cpu = arm_env_get_cpu(env);
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
}
@@ -3160,7 +3154,7 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value);
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -3628,7 +3622,7 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
* HCR_DC Disables stage1 and enables stage2 translation
*/
if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
raw_write(env, ri, value);
}
@@ -4072,6 +4066,13 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tda,
.type = ARM_CP_NOP },
+ /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
+ * to save and restore a 32-bit guest's DBGVCR)
+ */
+ { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
/* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
* Channel but Linux may try to access this register. The 32-bit
* alias is DBGDCCINT.
@@ -5094,6 +5095,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
}
+ if (arm_feature(env, ARM_FEATURE_VBAR)) {
+ ARMCPRegInfo vbar_cp_reginfo[] = {
+ { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .writefn = vbar_write,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
+ offsetof(CPUARMState, cp15.vbar_ns) },
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vbar_cp_reginfo);
+ }
+
/* Generic registers whose values depend on the implementation */
{
ARMCPRegInfo sctlr = {
@@ -5207,6 +5221,7 @@ static void arm_cpu_add_definition(gpointer data, gpointer user_data)
info = g_malloc0(sizeof(*info));
info->name = g_strndup(typename,
strlen(typename) - strlen("-" TYPE_ARM_CPU));
+ info->q_typename = g_strdup(typename);
entry = g_malloc0(sizeof(*entry));
entry->value = info;
@@ -5718,11 +5733,6 @@ uint32_t HELPER(uxtb16)(uint32_t x)
return res;
}
-uint32_t HELPER(clz)(uint32_t x)
-{
- return clz32(x);
-}
-
int32_t HELPER(sdiv)(int32_t num, int32_t den)
{
if (den == 0)
@@ -6396,6 +6406,20 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
}
offset = 4;
break;
+ case EXCP_VIRQ:
+ new_mode = ARM_CPU_MODE_IRQ;
+ addr = 0x18;
+ /* Disable IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
+ case EXCP_VFIQ:
+ new_mode = ARM_CPU_MODE_FIQ;
+ addr = 0x1c;
+ /* Disable FIQ, IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ offset = 4;
+ break;
case EXCP_SMC:
new_mode = ARM_CPU_MODE_MON;
addr = 0x08;
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 84aa637629..df86bf7141 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -1,4 +1,3 @@
-DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
diff --git a/target/arm/machine.c b/target/arm/machine.c
index d90943b6db..487320db1d 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -17,7 +17,8 @@ static bool vfp_needed(void *opaque)
return arm_feature(env, ARM_FEATURE_VFP);
}
-static int get_fpscr(QEMUFile *f, void *opaque, size_t size)
+static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
@@ -27,12 +28,14 @@ static int get_fpscr(QEMUFile *f, void *opaque, size_t size)
return 0;
}
-static void put_fpscr(QEMUFile *f, void *opaque, size_t size)
+static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
qemu_put_be32(f, vfp_get_fpscr(env));
+ return 0;
}
static const VMStateInfo vmstate_fpscr = {
@@ -163,7 +166,8 @@ static const VMStateDescription vmstate_pmsav7 = {
}
};
-static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
+static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
@@ -180,7 +184,8 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
return 0;
}
-static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
+static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
ARMCPU *cpu = opaque;
CPUARMState *env = &cpu->env;
@@ -193,6 +198,7 @@ static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
}
qemu_put_be32(f, val);
+ return 0;
}
static const VMStateInfo vmstate_cpsr = {
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index cd94216591..ba796d898e 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "internals.h"
@@ -972,6 +973,9 @@ void HELPER(exception_return)(CPUARMState *env)
} else {
env->regs[15] = env->elr_el[cur_el] & ~0x3;
}
+ qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
+ "AArch32 EL%d PC 0x%" PRIx32 "\n",
+ cur_el, new_el, env->regs[15]);
} else {
env->aarch64 = 1;
pstate_write(env, spsr);
@@ -980,6 +984,9 @@ void HELPER(exception_return)(CPUARMState *env)
}
aarch64_restore_sp(env, new_el);
env->pc = env->elr_el[cur_el];
+ qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
+ "AArch64 EL%d PC 0x%" PRIx64 "\n",
+ cur_el, new_el, env->pc);
}
arm_call_el_change_hook(arm_env_get_cpu(env));
@@ -1002,6 +1009,8 @@ illegal_return:
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
+ "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}
/* Return true if the linked breakpoint entry lbn passes its checks */
diff --git a/target/arm/psci.c b/target/arm/psci.c
index 14316eb0ae..64bf82eea1 100644
--- a/target/arm/psci.c
+++ b/target/arm/psci.c
@@ -148,17 +148,28 @@ void arm_handle_psci_call(ARMCPU *cpu)
case QEMU_PSCI_0_1_FN_CPU_ON:
case QEMU_PSCI_0_2_FN_CPU_ON:
case QEMU_PSCI_0_2_FN64_CPU_ON:
+ {
+ /* The PSCI spec mandates that newly brought up CPUs start
+ * in the highest exception level which exists and is enabled
+ * on the calling CPU. Since the QEMU PSCI implementation is
+ * acting as a "fake EL3" or "fake EL2" firmware, this for us
+ * means that we want to start at the highest NS exception level
+ * that we are providing to the guest.
+ * The execution mode should be that which is currently in use
+ * by the same exception level on the calling CPU.
+ * The CPU should be started with the context_id value
+ * in x0 (if AArch64) or r0 (if AArch32).
+ */
+ int target_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1;
+ bool target_aarch64 = arm_el_is_aa64(env, target_el);
+
mpidr = param[1];
entry = param[2];
context_id = param[3];
- /*
- * The PSCI spec mandates that newly brought up CPUs enter the
- * exception level of the caller in the same execution mode as
- * the caller, with context_id in x0/r0, respectively.
- */
- ret = arm_set_cpu_on(mpidr, entry, context_id, arm_current_el(env),
- is_a64(env));
+ ret = arm_set_cpu_on(mpidr, entry, context_id,
+ target_el, target_aarch64);
break;
+ }
case QEMU_PSCI_0_1_FN_CPU_OFF:
case QEMU_PSCI_0_2_FN_CPU_OFF:
goto cpu_off;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 6dc27a6115..d0352e2045 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -527,7 +527,7 @@ static inline void assert_fp_access_checked(DisasContext *s)
static inline int vec_reg_offset(DisasContext *s, int regno,
int element, TCGMemOp size)
{
- int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
+ int offs = 0;
#ifdef HOST_WORDS_BIGENDIAN
/* This is complicated slightly because vfp.regs[2n] is
* still the low half and vfp.regs[2n+1] the high half
@@ -540,6 +540,7 @@ static inline int vec_reg_offset(DisasContext *s, int regno,
#else
offs += element * (1 << size);
#endif
+ offs += offsetof(CPUARMState, vfp.regs[regno * 2]);
assert_fp_access_checked(s);
return offs;
}
@@ -2829,9 +2830,9 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
} else {
/* Load/store one element per register */
if (is_load) {
- do_vec_ld(s, rt, index, tcg_addr, s->be_data + scale);
+ do_vec_ld(s, rt, index, tcg_addr, scale);
} else {
- do_vec_st(s, rt, index, tcg_addr, s->be_data + scale);
+ do_vec_st(s, rt, index, tcg_addr, scale);
}
}
tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
@@ -3215,67 +3216,44 @@ static void disas_bitfield(DisasContext *s, uint32_t insn)
low 32-bits anyway. */
tcg_tmp = read_cpu_reg(s, rn, 1);
- /* Recognize the common aliases. */
- if (opc == 0) { /* SBFM */
- if (ri == 0) {
- if (si == 7) { /* SXTB */
- tcg_gen_ext8s_i64(tcg_rd, tcg_tmp);
- goto done;
- } else if (si == 15) { /* SXTH */
- tcg_gen_ext16s_i64(tcg_rd, tcg_tmp);
- goto done;
- } else if (si == 31) { /* SXTW */
- tcg_gen_ext32s_i64(tcg_rd, tcg_tmp);
- goto done;
- }
- }
- if (si == 63 || (si == 31 && ri <= si)) { /* ASR */
- if (si == 31) {
- tcg_gen_ext32s_i64(tcg_tmp, tcg_tmp);
- }
- tcg_gen_sari_i64(tcg_rd, tcg_tmp, ri);
+ /* Recognize simple(r) extractions. */
+ if (si >= ri) {
+ /* Wd<s-r:0> = Wn<s:r> */
+ len = (si - ri) + 1;
+ if (opc == 0) { /* SBFM: ASR, SBFX, SXTB, SXTH, SXTW */
+ tcg_gen_sextract_i64(tcg_rd, tcg_tmp, ri, len);
goto done;
- }
- } else if (opc == 2) { /* UBFM */
- if (ri == 0) { /* UXTB, UXTH, plus non-canonical AND */
- tcg_gen_andi_i64(tcg_rd, tcg_tmp, bitmask64(si + 1));
+ } else if (opc == 2) { /* UBFM: UBFX, LSR, UXTB, UXTH */
+ tcg_gen_extract_i64(tcg_rd, tcg_tmp, ri, len);
return;
}
- if (si == 63 || (si == 31 && ri <= si)) { /* LSR */
- if (si == 31) {
- tcg_gen_ext32u_i64(tcg_tmp, tcg_tmp);
- }
- tcg_gen_shri_i64(tcg_rd, tcg_tmp, ri);
- return;
- }
- if (si + 1 == ri && si != bitsize - 1) { /* LSL */
- int shift = bitsize - 1 - si;
- tcg_gen_shli_i64(tcg_rd, tcg_tmp, shift);
- goto done;
- }
- }
-
- if (opc != 1) { /* SBFM or UBFM */
- tcg_gen_movi_i64(tcg_rd, 0);
- }
-
- /* do the bit move operation */
- if (si >= ri) {
- /* Wd<s-r:0> = Wn<s:r> */
- tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
+ /* opc == 1, BXFIL fall through to deposit */
+ tcg_gen_extract_i64(tcg_tmp, tcg_tmp, ri, len);
pos = 0;
- len = (si - ri) + 1;
} else {
- /* Wd<32+s-r,32-r> = Wn<s:0> */
- pos = bitsize - ri;
+ /* Handle the ri > si case with a deposit
+ * Wd<32+s-r,32-r> = Wn<s:0>
+ */
len = si + 1;
+ pos = (bitsize - ri) & (bitsize - 1);
}
- tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+ if (opc == 0 && len < ri) {
+ /* SBFM: sign extend the destination field from len to fill
+ the balance of the word. Let the deposit below insert all
+ of those sign bits. */
+ tcg_gen_sextract_i64(tcg_tmp, tcg_tmp, 0, len);
+ len = ri;
+ }
- if (opc == 0) { /* SBFM - sign extend the destination field */
- tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
- tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
+ if (opc == 1) { /* BFM, BXFIL */
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+ } else {
+ /* SBFM or UBFM: We start with zero, and we haven't modified
+ any bits outside bitsize, therefore the zero-extension
+ below is unneeded. */
+ tcg_gen_deposit_z_i64(tcg_rd, tcg_tmp, pos, len);
+ return;
}
done:
@@ -3976,11 +3954,11 @@ static void handle_clz(DisasContext *s, unsigned int sf,
tcg_rn = cpu_reg(s, rn);
if (sf) {
- gen_helper_clz64(tcg_rd, tcg_rn);
+ tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
- gen_helper_clz(tcg_tmp32, tcg_tmp32);
+ tcg_gen_clzi_i32(tcg_tmp32, tcg_tmp32, 32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
}
@@ -3994,11 +3972,11 @@ static void handle_cls(DisasContext *s, unsigned int sf,
tcg_rn = cpu_reg(s, rn);
if (sf) {
- gen_helper_cls64(tcg_rd, tcg_rn);
+ tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
- gen_helper_cls32(tcg_tmp32, tcg_tmp32);
+ tcg_gen_clrsb_i32(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
}
@@ -7613,9 +7591,9 @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u,
switch (opcode) {
case 0x4: /* CLS, CLZ */
if (u) {
- gen_helper_clz64(tcg_rd, tcg_rn);
+ tcg_gen_clzi_i64(tcg_rd, tcg_rn, 64);
} else {
- gen_helper_cls64(tcg_rd, tcg_rn);
+ tcg_gen_clrsb_i64(tcg_rd, tcg_rn);
}
break;
case 0x5: /* NOT */
@@ -10283,9 +10261,9 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
goto do_cmop;
case 0x4: /* CLS */
if (u) {
- gen_helper_clz32(tcg_res, tcg_op);
+ tcg_gen_clzi_i32(tcg_res, tcg_op, 32);
} else {
- gen_helper_cls32(tcg_res, tcg_op);
+ tcg_gen_clrsb_i32(tcg_res, tcg_op);
}
break;
case 0x7: /* SQABS, SQNEG */
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 0ad9070b45..c9186b6195 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -288,29 +288,6 @@ static void gen_revsh(TCGv_i32 var)
tcg_gen_ext16s_i32(var, var);
}
-/* Unsigned bitfield extract. */
-static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
-{
- if (shift)
- tcg_gen_shri_i32(var, var, shift);
- tcg_gen_andi_i32(var, var, mask);
-}
-
-/* Signed bitfield extract. */
-static void gen_sbfx(TCGv_i32 var, int shift, int width)
-{
- uint32_t signbit;
-
- if (shift)
- tcg_gen_sari_i32(var, var, shift);
- if (shift + width < 32) {
- signbit = 1u << (width - 1);
- tcg_gen_andi_i32(var, var, (1u << width) - 1);
- tcg_gen_xori_i32(var, var, signbit);
- tcg_gen_subi_i32(var, var, signbit);
- }
-}
-
/* Return (b << 32) + a. Mark inputs as dead */
static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
{
@@ -7060,7 +7037,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
switch (size) {
case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
- case 2: gen_helper_clz(tmp, tmp); break;
+ case 2: tcg_gen_clzi_i32(tmp, tmp, 32); break;
default: abort();
}
break;
@@ -8242,7 +8219,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
ARCH(5);
rd = (insn >> 12) & 0xf;
tmp = load_reg(s, rm);
- gen_helper_clz(tmp, tmp);
+ tcg_gen_clzi_i32(tmp, tmp, 32);
store_reg(s, rd, tmp);
} else {
goto illegal_op;
@@ -9178,9 +9155,9 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
goto illegal_op;
if (i < 32) {
if (op1 & 0x20) {
- gen_ubfx(tmp, shift, (1u << i) - 1);
+ tcg_gen_extract_i32(tmp, tmp, shift, i);
} else {
- gen_sbfx(tmp, shift, i);
+ tcg_gen_sextract_i32(tmp, tmp, shift, i);
}
}
store_reg(s, rd, tmp);
@@ -10015,7 +9992,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_temp_free_i32(tmp2);
break;
case 0x18: /* clz */
- gen_helper_clz(tmp, tmp);
+ tcg_gen_clzi_i32(tmp, tmp, 32);
break;
case 0x20:
case 0x21:
@@ -10497,15 +10474,17 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
imm++;
if (shift + imm > 32)
goto illegal_op;
- if (imm < 32)
- gen_sbfx(tmp, shift, imm);
+ if (imm < 32) {
+ tcg_gen_sextract_i32(tmp, tmp, shift, imm);
+ }
break;
case 6: /* Unsigned bitfield extract. */
imm++;
if (shift + imm > 32)
goto illegal_op;
- if (imm < 32)
- gen_ubfx(tmp, shift, (1u << imm) - 1);
+ if (imm < 32) {
+ tcg_gen_extract_i32(tmp, tmp, shift, imm);
+ }
break;
case 3: /* Bitfield insert/clear. */
if (imm < shift)
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
index 2e9ab9700e..5f766f09d6 100644
--- a/target/cris/cpu.c
+++ b/target/cris/cpu.c
@@ -52,9 +52,8 @@ static void cris_cpu_reset(CPUState *s)
ccc->parent_reset(s);
vr = env->pregs[PR_VR];
- memset(env, 0, offsetof(CPUCRISState, load_info));
+ memset(env, 0, offsetof(CPUCRISState, end_reset_fields));
env->pregs[PR_VR] = vr;
- tlb_flush(s, 1);
#if defined(CONFIG_USER_ONLY)
/* start in user mode with interrupts enabled. */
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
index 43d5f9d1da..920e1c33ba 100644
--- a/target/cris/cpu.h
+++ b/target/cris/cpu.h
@@ -167,10 +167,13 @@ typedef struct CPUCRISState {
*/
TLBSet tlbsets[2][4][16];
- CPU_COMMON
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
- /* Members from load_info on are preserved across resets. */
- void *load_info;
+ CPU_COMMON
+
+ /* Members from load_info on are preserved across resets. */
+ void *load_info;
} CPUCRISState;
/**
diff --git a/target/cris/helper.h b/target/cris/helper.h
index ff3595641a..20d21c4358 100644
--- a/target/cris/helper.h
+++ b/target/cris/helper.h
@@ -7,7 +7,6 @@ DEF_HELPER_1(rfn, void, env)
DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
-DEF_HELPER_FLAGS_1(lz, TCG_CALL_NO_SE, i32, i32)
DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c
index 504303913c..e92505c907 100644
--- a/target/cris/op_helper.c
+++ b/target/cris/op_helper.c
@@ -230,11 +230,6 @@ void helper_rfn(CPUCRISState *env)
env->pregs[PR_CCS] |= M_FLAG_V32;
}
-uint32_t helper_lz(uint32_t t0)
-{
- return clz32(t0);
-}
-
uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs)
{
/* FIXME: clean this up. */
diff --git a/target/cris/translate.c b/target/cris/translate.c
index b91042743f..0ee05ca02d 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -767,7 +767,7 @@ static void cris_alu_op_exec(DisasContext *dc, int op,
t_gen_subx_carry(dc, dst);
break;
case CC_OP_LZ:
- gen_helper_lz(dst, b);
+ tcg_gen_clzi_tl(dst, b, TARGET_LONG_BITS);
break;
case CC_OP_MULS:
tcg_gen_muls2_tl(dst, cpu_PR[PR_MOF], a, b);
diff --git a/target/hppa/Makefile.objs b/target/hppa/Makefile.objs
new file mode 100644
index 0000000000..263446fa0b
--- /dev/null
+++ b/target/hppa/Makefile.objs
@@ -0,0 +1 @@
+obj-y += translate.o helper.o cpu.o op_helper.o gdbstub.o
diff --git a/target/hppa/cpu-qom.h b/target/hppa/cpu-qom.h
new file mode 100644
index 0000000000..9084e4701d
--- /dev/null
+++ b/target/hppa/cpu-qom.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU HPPA CPU
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_HPPA_CPU_QOM_H
+#define QEMU_HPPA_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_HPPA_CPU "hppa-cpu"
+
+#define HPPA_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(HPPACPUClass, (klass), TYPE_HPPA_CPU)
+#define HPPA_CPU(obj) \
+ OBJECT_CHECK(HPPACPU, (obj), TYPE_HPPA_CPU)
+#define HPPA_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(HPPACPUClass, (obj), TYPE_HPPA_CPU)
+
+/**
+ * HPPACPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An HPPA CPU model.
+ */
+typedef struct HPPACPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} HPPACPUClass;
+
+typedef struct HPPACPU HPPACPU;
+
+#endif
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
new file mode 100644
index 0000000000..1d791d0f80
--- /dev/null
+++ b/target/hppa/cpu.c
@@ -0,0 +1,164 @@
+/*
+ * QEMU HPPA CPU
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+
+static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+ cpu->env.iaoq_f = value;
+ cpu->env.iaoq_b = value + 4;
+}
+
+static void hppa_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+ cpu->env.iaoq_f = tb->pc;
+ cpu->env.iaoq_b = tb->cs_base;
+ cpu->env.psw_n = tb->flags & 1;
+}
+
+static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
+{
+ info->mach = bfd_mach_hppa20;
+ info->print_insn = print_insn_hppa;
+}
+
+static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ HPPACPUClass *acc = HPPA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ acc->parent_realize(dev, errp);
+}
+
+/* Sort hppabetically by type name. */
+static gint hppa_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void hppa_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+
+ (*s->cpu_fprintf)(s->file, " %s\n", object_class_get_name(oc));
+}
+
+void hppa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_HPPA_CPU, false);
+ list = g_slist_sort(list, hppa_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, hppa_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+static void hppa_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ HPPACPU *cpu = HPPA_CPU(obj);
+ CPUHPPAState *env = &cpu->env;
+
+ cs->env_ptr = env;
+ cpu_hppa_loaded_fr0(env);
+ set_snan_bit_is_one(true, &env->fp_status);
+
+ hppa_translate_init();
+}
+
+HPPACPU *cpu_hppa_init(const char *cpu_model)
+{
+ HPPACPU *cpu;
+
+ cpu = HPPA_CPU(object_new(TYPE_HPPA_CPU));
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+static void hppa_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ HPPACPUClass *acc = HPPA_CPU_CLASS(oc);
+
+ acc->parent_realize = dc->realize;
+ dc->realize = hppa_cpu_realizefn;
+
+ cc->do_interrupt = hppa_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = hppa_cpu_exec_interrupt;
+ cc->dump_state = hppa_cpu_dump_state;
+ cc->set_pc = hppa_cpu_set_pc;
+ cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
+ cc->gdb_read_register = hppa_cpu_gdb_read_register;
+ cc->gdb_write_register = hppa_cpu_gdb_write_register;
+ cc->handle_mmu_fault = hppa_cpu_handle_mmu_fault;
+ cc->disas_set_info = hppa_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 128;
+}
+
+static const TypeInfo hppa_cpu_type_info = {
+ .name = TYPE_HPPA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(HPPACPU),
+ .instance_init = hppa_cpu_initfn,
+ .abstract = false,
+ .class_size = sizeof(HPPACPUClass),
+ .class_init = hppa_cpu_class_init,
+};
+
+static void hppa_cpu_register_types(void)
+{
+ type_register_static(&hppa_cpu_type_info);
+}
+
+type_init(hppa_cpu_register_types)
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
new file mode 100644
index 0000000000..4cf4ac65e3
--- /dev/null
+++ b/target/hppa/cpu.h
@@ -0,0 +1,144 @@
+/*
+ * PA-RISC emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HPPA_CPU_H
+#define HPPA_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+
+/* We only support hppa-linux-user at present, so 32-bit only. */
+#define TARGET_LONG_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define CPUArchState struct CPUHPPAState
+
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define TARGET_PAGE_BITS 12
+
+#define ALIGNED_ONLY
+#define NB_MMU_MODES 1
+#define MMU_USER_IDX 0
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+#define EXCP_SYSCALL 1
+#define EXCP_SYSCALL_LWS 2
+#define EXCP_SIGSEGV 3
+#define EXCP_SIGILL 4
+#define EXCP_SIGFPE 5
+
+typedef struct CPUHPPAState CPUHPPAState;
+
+struct CPUHPPAState {
+ target_ulong gr[32];
+ uint64_t fr[32];
+
+ target_ulong sar;
+ target_ulong cr26;
+ target_ulong cr27;
+
+ target_ulong psw_n; /* boolean */
+ target_long psw_v; /* in most significant bit */
+
+ /* Splitting the carry-borrow field into the MSB and "the rest", allows
+ * for "the rest" to be deleted when it is unused, but the MSB is in use.
+ * In addition, it's easier to compute carry-in for bit B+1 than it is to
+ * compute carry-out for bit B (3 vs 4 insns for addition, assuming the
+ * host has the appropriate add-with-carry insn to compute the msb).
+ * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
+ */
+ target_ulong psw_cb; /* in least significant bit of next nibble */
+ target_ulong psw_cb_msb; /* boolean */
+
+ target_ulong iaoq_f; /* front */
+ target_ulong iaoq_b; /* back, aka next instruction */
+
+ target_ulong ior; /* interrupt offset register */
+
+ uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
+ float_status fp_status;
+
+ /* Those resources are used only in QEMU core */
+ CPU_COMMON
+};
+
+/**
+ * HPPACPU:
+ * @env: #CPUHPPAState
+ *
+ * An HPPA CPU.
+ */
+struct HPPACPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUHPPAState env;
+};
+
+static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env)
+{
+ return container_of(env, HPPACPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(hppa_env_get_cpu(e))
+#define ENV_OFFSET offsetof(HPPACPU, env)
+
+#include "exec/cpu-all.h"
+
+static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
+{
+ return 0;
+}
+
+void hppa_translate_init(void);
+
+HPPACPU *cpu_hppa_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_hppa_init(cpu_model))
+
+void hppa_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+
+static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
+ target_ulong *cs_base,
+ uint32_t *pflags)
+{
+ *pc = env->iaoq_f;
+ *cs_base = env->iaoq_b;
+ *pflags = env->psw_n;
+}
+
+target_ulong cpu_hppa_get_psw(CPUHPPAState *env);
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong);
+void cpu_hppa_loaded_fr0(CPUHPPAState *env);
+
+#define cpu_signal_handler cpu_hppa_signal_handler
+
+int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc);
+int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, int midx);
+int hppa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void hppa_cpu_do_interrupt(CPUState *cpu);
+bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void hppa_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function, int);
+
+#endif /* HPPA_CPU_H */
diff --git a/target/hppa/gdbstub.c b/target/hppa/gdbstub.c
new file mode 100644
index 0000000000..413a5e12ad
--- /dev/null
+++ b/target/hppa/gdbstub.c
@@ -0,0 +1,111 @@
+/*
+ * HPPA gdb server stub
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int hppa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ target_ulong val;
+
+ switch (n) {
+ case 0:
+ val = cpu_hppa_get_psw(env);
+ break;
+ case 1 ... 31:
+ val = env->gr[n];
+ break;
+ case 32:
+ val = env->sar;
+ break;
+ case 33:
+ val = env->iaoq_f;
+ break;
+ case 35:
+ val = env->iaoq_b;
+ break;
+ case 59:
+ val = env->cr26;
+ break;
+ case 60:
+ val = env->cr27;
+ break;
+ case 64 ... 127:
+ val = extract64(env->fr[(n - 64) / 2], (n & 1 ? 0 : 32), 32);
+ break;
+ default:
+ if (n < 128) {
+ val = 0;
+ } else {
+ return 0;
+ }
+ break;
+ }
+ return gdb_get_regl(mem_buf, val);
+}
+
+int hppa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ target_ulong val = ldtul_p(mem_buf);
+
+ switch (n) {
+ case 0:
+ cpu_hppa_put_psw(env, val);
+ break;
+ case 1 ... 31:
+ env->gr[n] = val;
+ break;
+ case 32:
+ env->sar = val;
+ break;
+ case 33:
+ env->iaoq_f = val;
+ break;
+ case 35:
+ env->iaoq_b = val;
+ case 59:
+ env->cr26 = val;
+ break;
+ case 60:
+ env->cr27 = val;
+ break;
+ case 64:
+ env->fr[0] = deposit64(env->fr[0], 32, 32, val);
+ cpu_hppa_loaded_fr0(env);
+ break;
+ case 65 ... 127:
+ {
+ uint64_t *fr = &env->fr[(n - 64) / 2];
+ *fr = deposit64(*fr, val, (n & 1 ? 0 : 32), 32);
+ }
+ break;
+ default:
+ if (n >= 128) {
+ return 0;
+ }
+ break;
+ }
+ return sizeof(target_ulong);
+}
diff --git a/target/hppa/helper.c b/target/hppa/helper.c
new file mode 100644
index 0000000000..ba04a9a52b
--- /dev/null
+++ b/target/hppa/helper.c
@@ -0,0 +1,137 @@
+/*
+ * HPPA emulation cpu helpers for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+#include "exec/helper-proto.h"
+
+target_ulong cpu_hppa_get_psw(CPUHPPAState *env)
+{
+ target_ulong psw;
+
+ /* Fold carry bits down to 8 consecutive bits. */
+ /* ??? Needs tweaking for hppa64. */
+ /* .......b...c...d...e...f...g...h */
+ psw = (env->psw_cb >> 4) & 0x01111111;
+ /* .......b..bc..cd..de..ef..fg..gh */
+ psw |= psw >> 3;
+ /* .............bcd............efgh */
+ psw |= (psw >> 6) & 0x000f000f;
+ /* .........................bcdefgh */
+ psw |= (psw >> 12) & 0xf;
+ psw |= env->psw_cb_msb << 7;
+ psw <<= 8;
+
+ psw |= env->psw_n << 21;
+ psw |= (env->psw_v < 0) << 17;
+
+ return psw;
+}
+
+void cpu_hppa_put_psw(CPUHPPAState *env, target_ulong psw)
+{
+ target_ulong cb = 0;
+
+ env->psw_n = (psw >> 21) & 1;
+ env->psw_v = -((psw >> 17) & 1);
+ env->psw_cb_msb = (psw >> 15) & 1;
+
+ cb |= ((psw >> 14) & 1) << 28;
+ cb |= ((psw >> 13) & 1) << 24;
+ cb |= ((psw >> 12) & 1) << 20;
+ cb |= ((psw >> 11) & 1) << 16;
+ cb |= ((psw >> 10) & 1) << 12;
+ cb |= ((psw >> 9) & 1) << 8;
+ cb |= ((psw >> 8) & 1) << 4;
+ env->psw_cb = cb;
+}
+
+int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int rw, int mmu_idx)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+
+ cs->exception_index = EXCP_SIGSEGV;
+ cpu->env.ior = address;
+ return 1;
+}
+
+void hppa_cpu_do_interrupt(CPUState *cs)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ int i = cs->exception_index;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name = "<unknown>";
+
+ switch (i) {
+ case EXCP_SYSCALL:
+ name = "syscall";
+ break;
+ case EXCP_SIGSEGV:
+ name = "sigsegv";
+ break;
+ case EXCP_SIGILL:
+ name = "sigill";
+ break;
+ case EXCP_SIGFPE:
+ name = "sigfpe";
+ break;
+ }
+ qemu_log("INT %6d: %s ia_f=" TARGET_FMT_lx "\n",
+ ++count, name, env->iaoq_f);
+ }
+ cs->exception_index = -1;
+}
+
+bool hppa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ abort();
+ return false;
+}
+
+void hppa_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ HPPACPU *cpu = HPPA_CPU(cs);
+ CPUHPPAState *env = &cpu->env;
+ int i;
+
+ cpu_fprintf(f, "IA_F " TARGET_FMT_lx
+ " IA_B " TARGET_FMT_lx
+ " PSW " TARGET_FMT_lx
+ " [N:" TARGET_FMT_ld " V:%d"
+ " CB:" TARGET_FMT_lx "]\n ",
+ env->iaoq_f, env->iaoq_b, cpu_hppa_get_psw(env),
+ env->psw_n, env->psw_v < 0,
+ ((env->psw_cb >> 4) & 0x01111111) | (env->psw_cb_msb << 28));
+ for (i = 1; i < 32; i++) {
+ cpu_fprintf(f, "GR%02d " TARGET_FMT_lx " ", i, env->gr[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+
+ /* ??? FR */
+}
diff --git a/target/hppa/helper.h b/target/hppa/helper.h
new file mode 100644
index 0000000000..789f07fc0a
--- /dev/null
+++ b/target/hppa/helper.h
@@ -0,0 +1,66 @@
+DEF_HELPER_2(excp, noreturn, env, int)
+DEF_HELPER_FLAGS_2(tsv, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(tcond, TCG_CALL_NO_WG, void, env, tl)
+
+DEF_HELPER_FLAGS_3(stby_b, TCG_CALL_NO_WG, void, env, tl, tl)
+DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tl)
+
+DEF_HELPER_FLAGS_1(probe_r, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(probe_w, TCG_CALL_NO_RWG_SE, tl, tl)
+
+DEF_HELPER_FLAGS_1(loaded_fr0, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_2(frnd_s, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmpy_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, f32, env, f32, f32)
+
+DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_2(frnd_d, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fmpy_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, f64, env, f64, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_s_d, TCG_CALL_NO_RWG, f64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_s, TCG_CALL_NO_RWG, f32, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_w_s, TCG_CALL_NO_RWG, f32, env, s32)
+DEF_HELPER_FLAGS_2(fcnv_dw_s, TCG_CALL_NO_RWG, f32, env, s64)
+DEF_HELPER_FLAGS_2(fcnv_w_d, TCG_CALL_NO_RWG, f64, env, s32)
+DEF_HELPER_FLAGS_2(fcnv_dw_d, TCG_CALL_NO_RWG, f64, env, s64)
+
+DEF_HELPER_FLAGS_2(fcnv_s_w, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_w, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_s_dw, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_dw, TCG_CALL_NO_RWG, s64, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_t_s_w, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_w, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_t_s_dw, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_dw, TCG_CALL_NO_RWG, s64, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_uw_s, TCG_CALL_NO_RWG, f32, env, i32)
+DEF_HELPER_FLAGS_2(fcnv_udw_s, TCG_CALL_NO_RWG, f32, env, i64)
+DEF_HELPER_FLAGS_2(fcnv_uw_d, TCG_CALL_NO_RWG, f64, env, i32)
+DEF_HELPER_FLAGS_2(fcnv_udw_d, TCG_CALL_NO_RWG, f64, env, i64)
+
+DEF_HELPER_FLAGS_2(fcnv_s_uw, TCG_CALL_NO_RWG, i32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_uw, TCG_CALL_NO_RWG, i32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_s_udw, TCG_CALL_NO_RWG, i64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_d_udw, TCG_CALL_NO_RWG, i64, env, f64)
+
+DEF_HELPER_FLAGS_2(fcnv_t_s_uw, TCG_CALL_NO_RWG, i32, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_uw, TCG_CALL_NO_RWG, i32, env, f64)
+DEF_HELPER_FLAGS_2(fcnv_t_s_udw, TCG_CALL_NO_RWG, i64, env, f32)
+DEF_HELPER_FLAGS_2(fcnv_t_d_udw, TCG_CALL_NO_RWG, i64, env, f64)
+
+DEF_HELPER_FLAGS_5(fcmp_s, TCG_CALL_NO_RWG, void, env, f32, f32, i32, i32)
+DEF_HELPER_FLAGS_5(fcmp_d, TCG_CALL_NO_RWG, void, env, f64, f64, i32, i32)
+
+DEF_HELPER_FLAGS_4(fmpyfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(fmpynfadd_s, TCG_CALL_NO_RWG, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(fmpyfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(fmpynfadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
new file mode 100644
index 0000000000..c05c0d5572
--- /dev/null
+++ b/target/hppa/op_helper.c
@@ -0,0 +1,570 @@
+/*
+ * Helpers for HPPA instructions.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+
+void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp)
+{
+ HPPACPU *cpu = hppa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+static void QEMU_NORETURN dynexcp(CPUHPPAState *env, int excp, uintptr_t ra)
+{
+ HPPACPU *cpu = hppa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ cpu_loop_exit_restore(cs, ra);
+}
+
+void HELPER(tsv)(CPUHPPAState *env, target_ulong cond)
+{
+ if (unlikely((target_long)cond < 0)) {
+ dynexcp(env, EXCP_SIGFPE, GETPC());
+ }
+}
+
+void HELPER(tcond)(CPUHPPAState *env, target_ulong cond)
+{
+ if (unlikely(cond)) {
+ dynexcp(env, EXCP_SIGFPE, GETPC());
+ }
+}
+
+static void atomic_store_3(CPUHPPAState *env, target_ulong addr, uint32_t val,
+ uint32_t mask, uintptr_t ra)
+{
+ uint32_t old, new, cmp;
+
+#ifdef CONFIG_USER_ONLY
+ uint32_t *haddr = g2h(addr - 1);
+ old = *haddr;
+ while (1) {
+ new = (old & ~mask) | (val & mask);
+ cmp = atomic_cmpxchg(haddr, old, new);
+ if (cmp == old) {
+ return;
+ }
+ old = cmp;
+ }
+#else
+#error "Not implemented."
+#endif
+}
+
+void HELPER(stby_b)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+{
+ uintptr_t ra = GETPC();
+
+ switch (addr & 3) {
+ case 3:
+ cpu_stb_data_ra(env, addr, val, ra);
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr, val, ra);
+ break;
+ case 1:
+ /* The 3 byte store must appear atomic. */
+ if (parallel_cpus) {
+ atomic_store_3(env, addr, val, 0x00ffffffu, ra);
+ } else {
+ cpu_stb_data_ra(env, addr, val >> 16, ra);
+ cpu_stw_data_ra(env, addr + 1, val, ra);
+ }
+ break;
+ default:
+ cpu_stl_data_ra(env, addr, val, ra);
+ break;
+ }
+}
+
+void HELPER(stby_e)(CPUHPPAState *env, target_ulong addr, target_ulong val)
+{
+ uintptr_t ra = GETPC();
+
+ switch (addr & 3) {
+ case 3:
+ /* The 3 byte store must appear atomic. */
+ if (parallel_cpus) {
+ atomic_store_3(env, addr - 3, val, 0xffffff00u, ra);
+ } else {
+ cpu_stw_data_ra(env, addr - 3, val >> 16, ra);
+ cpu_stb_data_ra(env, addr - 1, val >> 8, ra);
+ }
+ break;
+ case 2:
+ cpu_stw_data_ra(env, addr - 2, val >> 16, ra);
+ break;
+ case 1:
+ cpu_stb_data_ra(env, addr - 1, val >> 24, ra);
+ break;
+ default:
+ /* Nothing is stored, but protection is checked and the
+ cacheline is marked dirty. */
+#ifndef CONFIG_USER_ONLY
+ probe_write(env, addr, cpu_mmu_index(env, 0), ra);
+#endif
+ break;
+ }
+}
+
+target_ulong HELPER(probe_r)(target_ulong addr)
+{
+ return page_check_range(addr, 1, PAGE_READ);
+}
+
+target_ulong HELPER(probe_w)(target_ulong addr)
+{
+ return page_check_range(addr, 1, PAGE_WRITE);
+}
+
+void HELPER(loaded_fr0)(CPUHPPAState *env)
+{
+ uint32_t shadow = env->fr[0] >> 32;
+ int rm, d;
+
+ env->fr0_shadow = shadow;
+
+ switch (extract32(shadow, 9, 2)) {
+ default:
+ rm = float_round_nearest_even;
+ break;
+ case 1:
+ rm = float_round_to_zero;
+ break;
+ case 2:
+ rm = float_round_up;
+ break;
+ case 3:
+ rm = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rm, &env->fp_status);
+
+ d = extract32(shadow, 5, 1);
+ set_flush_to_zero(d, &env->fp_status);
+ set_flush_inputs_to_zero(d, &env->fp_status);
+}
+
+void cpu_hppa_loaded_fr0(CPUHPPAState *env)
+{
+ helper_loaded_fr0(env);
+}
+
+#define CONVERT_BIT(X, SRC, DST) \
+ ((SRC) > (DST) \
+ ? (X) / ((SRC) / (DST)) & (DST) \
+ : ((X) & (SRC)) * ((DST) / (SRC)))
+
+static void update_fr0_op(CPUHPPAState *env, uintptr_t ra)
+{
+ uint32_t soft_exp = get_float_exception_flags(&env->fp_status);
+ uint32_t hard_exp = 0;
+ uint32_t shadow = env->fr0_shadow;
+
+ if (likely(soft_exp == 0)) {
+ env->fr[0] = (uint64_t)shadow << 32;
+ return;
+ }
+ set_float_exception_flags(0, &env->fp_status);
+
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_inexact, 1u << 0);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_underflow, 1u << 1);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_overflow, 1u << 2);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_divbyzero, 1u << 3);
+ hard_exp |= CONVERT_BIT(soft_exp, float_flag_invalid, 1u << 4);
+ shadow |= hard_exp << (32 - 5);
+ env->fr0_shadow = shadow;
+ env->fr[0] = (uint64_t)shadow << 32;
+
+ if (hard_exp & shadow) {
+ dynexcp(env, EXCP_SIGFPE, ra);
+ }
+}
+
+float32 HELPER(fsqrt_s)(CPUHPPAState *env, float32 arg)
+{
+ float32 ret = float32_sqrt(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(frnd_s)(CPUHPPAState *env, float32 arg)
+{
+ float32 ret = float32_round_to_int(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fadd_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_add(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fsub_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_sub(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fmpy_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_mul(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fdiv_s)(CPUHPPAState *env, float32 a, float32 b)
+{
+ float32 ret = float32_div(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fsqrt_d)(CPUHPPAState *env, float64 arg)
+{
+ float64 ret = float64_sqrt(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(frnd_d)(CPUHPPAState *env, float64 arg)
+{
+ float64 ret = float64_round_to_int(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fadd_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_add(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fsub_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_sub(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fmpy_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_mul(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fdiv_d)(CPUHPPAState *env, float64 a, float64 b)
+{
+ float64 ret = float64_div(a, b, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_s_d)(CPUHPPAState *env, float32 arg)
+{
+ float64 ret = float32_to_float64(arg, &env->fp_status);
+ ret = float64_maybe_silence_nan(ret, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_d_s)(CPUHPPAState *env, float64 arg)
+{
+ float32 ret = float64_to_float32(arg, &env->fp_status);
+ ret = float32_maybe_silence_nan(ret, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_w_s)(CPUHPPAState *env, int32_t arg)
+{
+ float32 ret = int32_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_dw_s)(CPUHPPAState *env, int64_t arg)
+{
+ float32 ret = int64_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_w_d)(CPUHPPAState *env, int32_t arg)
+{
+ float64 ret = int32_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_dw_d)(CPUHPPAState *env, int64_t arg)
+{
+ float64 ret = int64_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_s_w)(CPUHPPAState *env, float32 arg)
+{
+ int32_t ret = float32_to_int32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_d_w)(CPUHPPAState *env, float64 arg)
+{
+ int32_t ret = float64_to_int32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_s_dw)(CPUHPPAState *env, float32 arg)
+{
+ int64_t ret = float32_to_int64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_d_dw)(CPUHPPAState *env, float64 arg)
+{
+ int64_t ret = float64_to_int64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_t_s_w)(CPUHPPAState *env, float32 arg)
+{
+ int32_t ret = float32_to_int32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int32_t HELPER(fcnv_t_d_w)(CPUHPPAState *env, float64 arg)
+{
+ int32_t ret = float64_to_int32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_t_s_dw)(CPUHPPAState *env, float32 arg)
+{
+ int64_t ret = float32_to_int64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+int64_t HELPER(fcnv_t_d_dw)(CPUHPPAState *env, float64 arg)
+{
+ int64_t ret = float64_to_int64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_uw_s)(CPUHPPAState *env, uint32_t arg)
+{
+ float32 ret = uint32_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fcnv_udw_s)(CPUHPPAState *env, uint64_t arg)
+{
+ float32 ret = uint64_to_float32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_uw_d)(CPUHPPAState *env, uint32_t arg)
+{
+ float64 ret = uint32_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fcnv_udw_d)(CPUHPPAState *env, uint64_t arg)
+{
+ float64 ret = uint64_to_float64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_s_uw)(CPUHPPAState *env, float32 arg)
+{
+ uint32_t ret = float32_to_uint32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_d_uw)(CPUHPPAState *env, float64 arg)
+{
+ uint32_t ret = float64_to_uint32(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_s_udw)(CPUHPPAState *env, float32 arg)
+{
+ uint64_t ret = float32_to_uint64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_d_udw)(CPUHPPAState *env, float64 arg)
+{
+ uint64_t ret = float64_to_uint64(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_t_s_uw)(CPUHPPAState *env, float32 arg)
+{
+ uint32_t ret = float32_to_uint32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint32_t HELPER(fcnv_t_d_uw)(CPUHPPAState *env, float64 arg)
+{
+ uint32_t ret = float64_to_uint32_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_t_s_udw)(CPUHPPAState *env, float32 arg)
+{
+ uint64_t ret = float32_to_uint64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+uint64_t HELPER(fcnv_t_d_udw)(CPUHPPAState *env, float64 arg)
+{
+ uint64_t ret = float64_to_uint64_round_to_zero(arg, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+static void update_fr0_cmp(CPUHPPAState *env, uint32_t y, uint32_t c, int r)
+{
+ uint32_t shadow = env->fr0_shadow;
+
+ switch (r) {
+ case float_relation_greater:
+ c = extract32(c, 4, 1);
+ break;
+ case float_relation_less:
+ c = extract32(c, 3, 1);
+ break;
+ case float_relation_equal:
+ c = extract32(c, 2, 1);
+ break;
+ case float_relation_unordered:
+ c = extract32(c, 1, 1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (y) {
+ /* targeted comparison */
+ /* set fpsr[ca[y - 1]] to current compare */
+ shadow = deposit32(shadow, 21 - (y - 1), 1, c);
+ } else {
+ /* queued comparison */
+ /* shift cq right by one place */
+ shadow = deposit32(shadow, 11, 10, extract32(shadow, 12, 10));
+ /* move fpsr[c] to fpsr[cq[0]] */
+ shadow = deposit32(shadow, 21, 1, extract32(shadow, 26, 1));
+ /* set fpsr[c] to current compare */
+ shadow = deposit32(shadow, 26, 1, c);
+ }
+
+ env->fr0_shadow = shadow;
+ env->fr[0] = (uint64_t)shadow << 32;
+}
+
+void HELPER(fcmp_s)(CPUHPPAState *env, float32 a, float32 b,
+ uint32_t y, uint32_t c)
+{
+ int r;
+ if (c & 1) {
+ r = float32_compare(a, b, &env->fp_status);
+ } else {
+ r = float32_compare_quiet(a, b, &env->fp_status);
+ }
+ update_fr0_op(env, GETPC());
+ update_fr0_cmp(env, y, c, r);
+}
+
+void HELPER(fcmp_d)(CPUHPPAState *env, float64 a, float64 b,
+ uint32_t y, uint32_t c)
+{
+ int r;
+ if (c & 1) {
+ r = float64_compare(a, b, &env->fp_status);
+ } else {
+ r = float64_compare_quiet(a, b, &env->fp_status);
+ }
+ update_fr0_op(env, GETPC());
+ update_fr0_cmp(env, y, c, r);
+}
+
+float32 HELPER(fmpyfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c)
+{
+ float32 ret = float32_muladd(a, b, c, 0, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float32 HELPER(fmpynfadd_s)(CPUHPPAState *env, float32 a, float32 b, float32 c)
+{
+ float32 ret = float32_muladd(a, b, c, float_muladd_negate_product,
+ &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fmpyfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c)
+{
+ float64 ret = float64_muladd(a, b, c, 0, &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
+
+float64 HELPER(fmpynfadd_d)(CPUHPPAState *env, float64 a, float64 b, float64 c)
+{
+ float64 ret = float64_muladd(a, b, c, float_muladd_negate_product,
+ &env->fp_status);
+ update_fr0_op(env, GETPC());
+ return ret;
+}
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
new file mode 100644
index 0000000000..4d243f7d3d
--- /dev/null
+++ b/target/hppa/translate.c
@@ -0,0 +1,3946 @@
+/*
+ * HPPA emulation cpu translation for qemu.
+ *
+ * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+typedef struct DisasCond {
+ TCGCond c;
+ TCGv a0, a1;
+ bool a0_is_n;
+ bool a1_is_0;
+} DisasCond;
+
+typedef struct DisasContext {
+ struct TranslationBlock *tb;
+ CPUState *cs;
+
+ target_ulong iaoq_f;
+ target_ulong iaoq_b;
+ target_ulong iaoq_n;
+ TCGv iaoq_n_var;
+
+ int ntemps;
+ TCGv temps[8];
+
+ DisasCond null_cond;
+ TCGLabel *null_lab;
+
+ bool singlestep_enabled;
+ bool psw_n_nonzero;
+} DisasContext;
+
+/* Return values from translate_one, indicating the state of the TB.
+ Note that zero indicates that we are not exiting the TB. */
+
+typedef enum {
+ NO_EXIT,
+
+ /* We have emitted one or more goto_tb. No fixup required. */
+ EXIT_GOTO_TB,
+
+ /* We are not using a goto_tb (for whatever reason), but have updated
+ the iaq (for whatever reason), so don't do it again on exit. */
+ EXIT_IAQ_N_UPDATED,
+
+ /* We are exiting the TB, but have neither emitted a goto_tb, nor
+ updated the iaq for the next instruction to be executed. */
+ EXIT_IAQ_N_STALE,
+
+ /* We are ending the TB with a noreturn function call, e.g. longjmp.
+ No following code will be executed. */
+ EXIT_NORETURN,
+} ExitStatus;
+
+typedef struct DisasInsn {
+ uint32_t insn, mask;
+ ExitStatus (*trans)(DisasContext *ctx, uint32_t insn,
+ const struct DisasInsn *f);
+ union {
+ void (*f_ttt)(TCGv, TCGv, TCGv);
+ void (*f_weww)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32);
+ void (*f_dedd)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64);
+ void (*f_wew)(TCGv_i32, TCGv_env, TCGv_i32);
+ void (*f_ded)(TCGv_i64, TCGv_env, TCGv_i64);
+ void (*f_wed)(TCGv_i32, TCGv_env, TCGv_i64);
+ void (*f_dew)(TCGv_i64, TCGv_env, TCGv_i32);
+ };
+} DisasInsn;
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv cpu_gr[32];
+static TCGv cpu_iaoq_f;
+static TCGv cpu_iaoq_b;
+static TCGv cpu_sar;
+static TCGv cpu_psw_n;
+static TCGv cpu_psw_v;
+static TCGv cpu_psw_cb;
+static TCGv cpu_psw_cb_msb;
+static TCGv cpu_cr26;
+static TCGv cpu_cr27;
+
+#include "exec/gen-icount.h"
+
+void hppa_translate_init(void)
+{
+#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
+
+ typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+ static const GlobalVar vars[] = {
+ DEF_VAR(sar),
+ DEF_VAR(cr26),
+ DEF_VAR(cr27),
+ DEF_VAR(psw_n),
+ DEF_VAR(psw_v),
+ DEF_VAR(psw_cb),
+ DEF_VAR(psw_cb_msb),
+ DEF_VAR(iaoq_f),
+ DEF_VAR(iaoq_b),
+ };
+
+#undef DEF_VAR
+
+ /* Use the symbolic register names that match the disassembler. */
+ static const char gr_names[32][4] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+ };
+
+ static bool done_init = 0;
+ int i;
+
+ if (done_init) {
+ return;
+ }
+ done_init = 1;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ TCGV_UNUSED(cpu_gr[0]);
+ for (i = 1; i < 32; i++) {
+ cpu_gr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUHPPAState, gr[i]),
+ gr_names[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+ const GlobalVar *v = &vars[i];
+ *v->var = tcg_global_mem_new(cpu_env, v->ofs, v->name);
+ }
+}
+
+static DisasCond cond_make_f(void)
+{
+ DisasCond r = { .c = TCG_COND_NEVER };
+ TCGV_UNUSED(r.a0);
+ TCGV_UNUSED(r.a1);
+ return r;
+}
+
+static DisasCond cond_make_n(void)
+{
+ DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
+ r.a0 = cpu_psw_n;
+ TCGV_UNUSED(r.a1);
+ return r;
+}
+
+static DisasCond cond_make_0(TCGCond c, TCGv a0)
+{
+ DisasCond r = { .c = c, .a1_is_0 = true };
+
+ assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
+ r.a0 = tcg_temp_new();
+ tcg_gen_mov_tl(r.a0, a0);
+ TCGV_UNUSED(r.a1);
+
+ return r;
+}
+
+static DisasCond cond_make(TCGCond c, TCGv a0, TCGv a1)
+{
+ DisasCond r = { .c = c };
+
+ assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
+ r.a0 = tcg_temp_new();
+ tcg_gen_mov_tl(r.a0, a0);
+ r.a1 = tcg_temp_new();
+ tcg_gen_mov_tl(r.a1, a1);
+
+ return r;
+}
+
+static void cond_prep(DisasCond *cond)
+{
+ if (cond->a1_is_0) {
+ cond->a1_is_0 = false;
+ cond->a1 = tcg_const_tl(0);
+ }
+}
+
+static void cond_free(DisasCond *cond)
+{
+ switch (cond->c) {
+ default:
+ if (!cond->a0_is_n) {
+ tcg_temp_free(cond->a0);
+ }
+ if (!cond->a1_is_0) {
+ tcg_temp_free(cond->a1);
+ }
+ cond->a0_is_n = false;
+ cond->a1_is_0 = false;
+ TCGV_UNUSED(cond->a0);
+ TCGV_UNUSED(cond->a1);
+ /* fallthru */
+ case TCG_COND_ALWAYS:
+ cond->c = TCG_COND_NEVER;
+ break;
+ case TCG_COND_NEVER:
+ break;
+ }
+}
+
+static TCGv get_temp(DisasContext *ctx)
+{
+ unsigned i = ctx->ntemps++;
+ g_assert(i < ARRAY_SIZE(ctx->temps));
+ return ctx->temps[i] = tcg_temp_new();
+}
+
+static TCGv load_const(DisasContext *ctx, target_long v)
+{
+ TCGv t = get_temp(ctx);
+ tcg_gen_movi_tl(t, v);
+ return t;
+}
+
+static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (reg == 0) {
+ TCGv t = get_temp(ctx);
+ tcg_gen_movi_tl(t, 0);
+ return t;
+ } else {
+ return cpu_gr[reg];
+ }
+}
+
+static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
+ return get_temp(ctx);
+ } else {
+ return cpu_gr[reg];
+ }
+}
+
+static void save_or_nullify(DisasContext *ctx, TCGv dest, TCGv t)
+{
+ if (ctx->null_cond.c != TCG_COND_NEVER) {
+ cond_prep(&ctx->null_cond);
+ tcg_gen_movcond_tl(ctx->null_cond.c, dest, ctx->null_cond.a0,
+ ctx->null_cond.a1, dest, t);
+ } else {
+ tcg_gen_mov_tl(dest, t);
+ }
+}
+
+static void save_gpr(DisasContext *ctx, unsigned reg, TCGv t)
+{
+ if (reg != 0) {
+ save_or_nullify(ctx, cpu_gr[reg], t);
+ }
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+# define HI_OFS 0
+# define LO_OFS 4
+#else
+# define HI_OFS 4
+# define LO_OFS 0
+#endif
+
+static TCGv_i32 load_frw_i32(unsigned rt)
+{
+ TCGv_i32 ret = tcg_temp_new_i32();
+ tcg_gen_ld_i32(ret, cpu_env,
+ offsetof(CPUHPPAState, fr[rt & 31])
+ + (rt & 32 ? LO_OFS : HI_OFS));
+ return ret;
+}
+
+static TCGv_i32 load_frw0_i32(unsigned rt)
+{
+ if (rt == 0) {
+ return tcg_const_i32(0);
+ } else {
+ return load_frw_i32(rt);
+ }
+}
+
+static TCGv_i64 load_frw0_i64(unsigned rt)
+{
+ if (rt == 0) {
+ return tcg_const_i64(0);
+ } else {
+ TCGv_i64 ret = tcg_temp_new_i64();
+ tcg_gen_ld32u_i64(ret, cpu_env,
+ offsetof(CPUHPPAState, fr[rt & 31])
+ + (rt & 32 ? LO_OFS : HI_OFS));
+ return ret;
+ }
+}
+
+static void save_frw_i32(unsigned rt, TCGv_i32 val)
+{
+ tcg_gen_st_i32(val, cpu_env,
+ offsetof(CPUHPPAState, fr[rt & 31])
+ + (rt & 32 ? LO_OFS : HI_OFS));
+}
+
+#undef HI_OFS
+#undef LO_OFS
+
+static TCGv_i64 load_frd(unsigned rt)
+{
+ TCGv_i64 ret = tcg_temp_new_i64();
+ tcg_gen_ld_i64(ret, cpu_env, offsetof(CPUHPPAState, fr[rt]));
+ return ret;
+}
+
+static TCGv_i64 load_frd0(unsigned rt)
+{
+ if (rt == 0) {
+ return tcg_const_i64(0);
+ } else {
+ return load_frd(rt);
+ }
+}
+
+static void save_frd(unsigned rt, TCGv_i64 val)
+{
+ tcg_gen_st_i64(val, cpu_env, offsetof(CPUHPPAState, fr[rt]));
+}
+
+/* Skip over the implementation of an insn that has been nullified.
+ Use this when the insn is too complex for a conditional move. */
+static void nullify_over(DisasContext *ctx)
+{
+ if (ctx->null_cond.c != TCG_COND_NEVER) {
+ /* The always condition should have been handled in the main loop. */
+ assert(ctx->null_cond.c != TCG_COND_ALWAYS);
+
+ ctx->null_lab = gen_new_label();
+ cond_prep(&ctx->null_cond);
+
+ /* If we're using PSW[N], copy it to a temp because... */
+ if (ctx->null_cond.a0_is_n) {
+ ctx->null_cond.a0_is_n = false;
+ ctx->null_cond.a0 = tcg_temp_new();
+ tcg_gen_mov_tl(ctx->null_cond.a0, cpu_psw_n);
+ }
+ /* ... we clear it before branching over the implementation,
+ so that (1) it's clear after nullifying this insn and
+ (2) if this insn nullifies the next, PSW[N] is valid. */
+ if (ctx->psw_n_nonzero) {
+ ctx->psw_n_nonzero = false;
+ tcg_gen_movi_tl(cpu_psw_n, 0);
+ }
+
+ tcg_gen_brcond_tl(ctx->null_cond.c, ctx->null_cond.a0,
+ ctx->null_cond.a1, ctx->null_lab);
+ cond_free(&ctx->null_cond);
+ }
+}
+
+/* Save the current nullification state to PSW[N]. */
+static void nullify_save(DisasContext *ctx)
+{
+ if (ctx->null_cond.c == TCG_COND_NEVER) {
+ if (ctx->psw_n_nonzero) {
+ tcg_gen_movi_tl(cpu_psw_n, 0);
+ }
+ return;
+ }
+ if (!ctx->null_cond.a0_is_n) {
+ cond_prep(&ctx->null_cond);
+ tcg_gen_setcond_tl(ctx->null_cond.c, cpu_psw_n,
+ ctx->null_cond.a0, ctx->null_cond.a1);
+ ctx->psw_n_nonzero = true;
+ }
+ cond_free(&ctx->null_cond);
+}
+
+/* Set a PSW[N] to X. The intention is that this is used immediately
+ before a goto_tb/exit_tb, so that there is no fallthru path to other
+ code within the TB. Therefore we do not update psw_n_nonzero. */
+static void nullify_set(DisasContext *ctx, bool x)
+{
+ if (ctx->psw_n_nonzero || x) {
+ tcg_gen_movi_tl(cpu_psw_n, x);
+ }
+}
+
+/* Mark the end of an instruction that may have been nullified.
+ This is the pair to nullify_over. */
+static ExitStatus nullify_end(DisasContext *ctx, ExitStatus status)
+{
+ TCGLabel *null_lab = ctx->null_lab;
+
+ if (likely(null_lab == NULL)) {
+ /* The current insn wasn't conditional or handled the condition
+ applied to it without a branch, so the (new) setting of
+ NULL_COND can be applied directly to the next insn. */
+ return status;
+ }
+ ctx->null_lab = NULL;
+
+ if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
+ /* The next instruction will be unconditional,
+ and NULL_COND already reflects that. */
+ gen_set_label(null_lab);
+ } else {
+ /* The insn that we just executed is itself nullifying the next
+ instruction. Store the condition in the PSW[N] global.
+ We asserted PSW[N] = 0 in nullify_over, so that after the
+ label we have the proper value in place. */
+ nullify_save(ctx);
+ gen_set_label(null_lab);
+ ctx->null_cond = cond_make_n();
+ }
+
+ assert(status != EXIT_GOTO_TB && status != EXIT_IAQ_N_UPDATED);
+ if (status == EXIT_NORETURN) {
+ status = NO_EXIT;
+ }
+ return status;
+}
+
+static void copy_iaoq_entry(TCGv dest, target_ulong ival, TCGv vval)
+{
+ if (unlikely(ival == -1)) {
+ tcg_gen_mov_tl(dest, vval);
+ } else {
+ tcg_gen_movi_tl(dest, ival);
+ }
+}
+
+static inline target_ulong iaoq_dest(DisasContext *ctx, target_long disp)
+{
+ return ctx->iaoq_f + disp + 8;
+}
+
+static void gen_excp_1(int exception)
+{
+ TCGv_i32 t = tcg_const_i32(exception);
+ gen_helper_excp(cpu_env, t);
+ tcg_temp_free_i32(t);
+}
+
+static ExitStatus gen_excp(DisasContext *ctx, int exception)
+{
+ copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
+ copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
+ nullify_save(ctx);
+ gen_excp_1(exception);
+ return EXIT_NORETURN;
+}
+
+static ExitStatus gen_illegal(DisasContext *ctx)
+{
+ nullify_over(ctx);
+ return nullify_end(ctx, gen_excp(ctx, EXCP_SIGILL));
+}
+
+static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ /* Suppress goto_tb in the case of single-steping and IO. */
+ if ((ctx->tb->cflags & CF_LAST_IO) || ctx->singlestep_enabled) {
+ return false;
+ }
+ return true;
+}
+
+/* If the next insn is to be nullified, and it's on the same page,
+ and we're not attempting to set a breakpoint on it, then we can
+ totally skip the nullified insn. This avoids creating and
+ executing a TB that merely branches to the next TB. */
+static bool use_nullify_skip(DisasContext *ctx)
+{
+ return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
+ && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
+}
+
+static void gen_goto_tb(DisasContext *ctx, int which,
+ target_ulong f, target_ulong b)
+{
+ if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
+ tcg_gen_goto_tb(which);
+ tcg_gen_movi_tl(cpu_iaoq_f, f);
+ tcg_gen_movi_tl(cpu_iaoq_b, b);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + which);
+ } else {
+ copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
+ if (ctx->singlestep_enabled) {
+ gen_excp_1(EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ }
+}
+
+/* PA has a habit of taking the LSB of a field and using that as the sign,
+ with the rest of the field becoming the least significant bits. */
+static target_long low_sextract(uint32_t val, int pos, int len)
+{
+ target_ulong x = -(target_ulong)extract32(val, pos, 1);
+ x = (x << (len - 1)) | extract32(val, pos + 1, len - 1);
+ return x;
+}
+
+static unsigned assemble_rt64(uint32_t insn)
+{
+ unsigned r1 = extract32(insn, 6, 1);
+ unsigned r0 = extract32(insn, 0, 5);
+ return r1 * 32 + r0;
+}
+
+static unsigned assemble_ra64(uint32_t insn)
+{
+ unsigned r1 = extract32(insn, 7, 1);
+ unsigned r0 = extract32(insn, 21, 5);
+ return r1 * 32 + r0;
+}
+
+static unsigned assemble_rb64(uint32_t insn)
+{
+ unsigned r1 = extract32(insn, 12, 1);
+ unsigned r0 = extract32(insn, 16, 5);
+ return r1 * 32 + r0;
+}
+
+static unsigned assemble_rc64(uint32_t insn)
+{
+ unsigned r2 = extract32(insn, 8, 1);
+ unsigned r1 = extract32(insn, 13, 3);
+ unsigned r0 = extract32(insn, 9, 2);
+ return r2 * 32 + r1 * 4 + r0;
+}
+
+static target_long assemble_12(uint32_t insn)
+{
+ target_ulong x = -(target_ulong)(insn & 1);
+ x = (x << 1) | extract32(insn, 2, 1);
+ x = (x << 10) | extract32(insn, 3, 10);
+ return x;
+}
+
+static target_long assemble_16(uint32_t insn)
+{
+ /* Take the name from PA2.0, which produces a 16-bit number
+ only with wide mode; otherwise a 14-bit number. Since we don't
+ implement wide mode, this is always the 14-bit number. */
+ return low_sextract(insn, 0, 14);
+}
+
+static target_long assemble_16a(uint32_t insn)
+{
+ /* Take the name from PA2.0, which produces a 14-bit shifted number
+ only with wide mode; otherwise a 12-bit shifted number. Since we
+ don't implement wide mode, this is always the 12-bit number. */
+ target_ulong x = -(target_ulong)(insn & 1);
+ x = (x << 11) | extract32(insn, 2, 11);
+ return x << 2;
+}
+
+static target_long assemble_17(uint32_t insn)
+{
+ target_ulong x = -(target_ulong)(insn & 1);
+ x = (x << 5) | extract32(insn, 16, 5);
+ x = (x << 1) | extract32(insn, 2, 1);
+ x = (x << 10) | extract32(insn, 3, 10);
+ return x << 2;
+}
+
+static target_long assemble_21(uint32_t insn)
+{
+ target_ulong x = -(target_ulong)(insn & 1);
+ x = (x << 11) | extract32(insn, 1, 11);
+ x = (x << 2) | extract32(insn, 14, 2);
+ x = (x << 5) | extract32(insn, 16, 5);
+ x = (x << 2) | extract32(insn, 12, 2);
+ return x << 11;
+}
+
+static target_long assemble_22(uint32_t insn)
+{
+ target_ulong x = -(target_ulong)(insn & 1);
+ x = (x << 10) | extract32(insn, 16, 10);
+ x = (x << 1) | extract32(insn, 2, 1);
+ x = (x << 10) | extract32(insn, 3, 10);
+ return x << 2;
+}
+
+/* The parisc documentation describes only the general interpretation of
+ the conditions, without describing their exact implementation. The
+ interpretations do not stand up well when considering ADD,C and SUB,B.
+ However, considering the Addition, Subtraction and Logical conditions
+ as a whole it would appear that these relations are similar to what
+ a traditional NZCV set of flags would produce. */
+
+static DisasCond do_cond(unsigned cf, TCGv res, TCGv cb_msb, TCGv sv)
+{
+ DisasCond cond;
+ TCGv tmp;
+
+ switch (cf >> 1) {
+ case 0: /* Never / TR */
+ cond = cond_make_f();
+ break;
+ case 1: /* = / <> (Z / !Z) */
+ cond = cond_make_0(TCG_COND_EQ, res);
+ break;
+ case 2: /* < / >= (N / !N) */
+ cond = cond_make_0(TCG_COND_LT, res);
+ break;
+ case 3: /* <= / > (N | Z / !N & !Z) */
+ cond = cond_make_0(TCG_COND_LE, res);
+ break;
+ case 4: /* NUV / UV (!C / C) */
+ cond = cond_make_0(TCG_COND_EQ, cb_msb);
+ break;
+ case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
+ tmp = tcg_temp_new();
+ tcg_gen_neg_tl(tmp, cb_msb);
+ tcg_gen_and_tl(tmp, tmp, res);
+ cond = cond_make_0(TCG_COND_EQ, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 6: /* SV / NSV (V / !V) */
+ cond = cond_make_0(TCG_COND_LT, sv);
+ break;
+ case 7: /* OD / EV */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_tl(tmp, res, 1);
+ cond = cond_make_0(TCG_COND_NE, tmp);
+ tcg_temp_free(tmp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (cf & 1) {
+ cond.c = tcg_invert_cond(cond.c);
+ }
+
+ return cond;
+}
+
+/* Similar, but for the special case of subtraction without borrow, we
+ can use the inputs directly. This can allow other computation to be
+ deleted as unused. */
+
+static DisasCond do_sub_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2, TCGv sv)
+{
+ DisasCond cond;
+
+ switch (cf >> 1) {
+ case 1: /* = / <> */
+ cond = cond_make(TCG_COND_EQ, in1, in2);
+ break;
+ case 2: /* < / >= */
+ cond = cond_make(TCG_COND_LT, in1, in2);
+ break;
+ case 3: /* <= / > */
+ cond = cond_make(TCG_COND_LE, in1, in2);
+ break;
+ case 4: /* << / >>= */
+ cond = cond_make(TCG_COND_LTU, in1, in2);
+ break;
+ case 5: /* <<= / >> */
+ cond = cond_make(TCG_COND_LEU, in1, in2);
+ break;
+ default:
+ return do_cond(cf, res, sv, sv);
+ }
+ if (cf & 1) {
+ cond.c = tcg_invert_cond(cond.c);
+ }
+
+ return cond;
+}
+
+/* Similar, but for logicals, where the carry and overflow bits are not
+ computed, and use of them is undefined. */
+
+static DisasCond do_log_cond(unsigned cf, TCGv res)
+{
+ switch (cf >> 1) {
+ case 4: case 5: case 6:
+ cf &= 1;
+ break;
+ }
+ return do_cond(cf, res, res, res);
+}
+
+/* Similar, but for shift/extract/deposit conditions. */
+
+static DisasCond do_sed_cond(unsigned orig, TCGv res)
+{
+ unsigned c, f;
+
+ /* Convert the compressed condition codes to standard.
+ 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
+ 4-7 are the reverse of 0-3. */
+ c = orig & 3;
+ if (c == 3) {
+ c = 7;
+ }
+ f = (orig & 4) / 4;
+
+ return do_log_cond(c * 2 + f, res);
+}
+
+/* Similar, but for unit conditions. */
+
+static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
+{
+ DisasCond cond;
+ TCGv tmp, cb;
+
+ TCGV_UNUSED(cb);
+ if (cf & 8) {
+ /* Since we want to test lots of carry-out bits all at once, do not
+ * do our normal thing and compute carry-in of bit B+1 since that
+ * leaves us with carry bits spread across two words.
+ */
+ cb = tcg_temp_new();
+ tmp = tcg_temp_new();
+ tcg_gen_or_tl(cb, in1, in2);
+ tcg_gen_and_tl(tmp, in1, in2);
+ tcg_gen_andc_tl(cb, cb, res);
+ tcg_gen_or_tl(cb, cb, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ switch (cf >> 1) {
+ case 0: /* never / TR */
+ case 1: /* undefined */
+ case 5: /* undefined */
+ cond = cond_make_f();
+ break;
+
+ case 2: /* SBZ / NBZ */
+ /* See hasless(v,1) from
+ * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
+ */
+ tmp = tcg_temp_new();
+ tcg_gen_subi_tl(tmp, res, 0x01010101u);
+ tcg_gen_andc_tl(tmp, tmp, res);
+ tcg_gen_andi_tl(tmp, tmp, 0x80808080u);
+ cond = cond_make_0(TCG_COND_NE, tmp);
+ tcg_temp_free(tmp);
+ break;
+
+ case 3: /* SHZ / NHZ */
+ tmp = tcg_temp_new();
+ tcg_gen_subi_tl(tmp, res, 0x00010001u);
+ tcg_gen_andc_tl(tmp, tmp, res);
+ tcg_gen_andi_tl(tmp, tmp, 0x80008000u);
+ cond = cond_make_0(TCG_COND_NE, tmp);
+ tcg_temp_free(tmp);
+ break;
+
+ case 4: /* SDC / NDC */
+ tcg_gen_andi_tl(cb, cb, 0x88888888u);
+ cond = cond_make_0(TCG_COND_NE, cb);
+ break;
+
+ case 6: /* SBC / NBC */
+ tcg_gen_andi_tl(cb, cb, 0x80808080u);
+ cond = cond_make_0(TCG_COND_NE, cb);
+ break;
+
+ case 7: /* SHC / NHC */
+ tcg_gen_andi_tl(cb, cb, 0x80008000u);
+ cond = cond_make_0(TCG_COND_NE, cb);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+ if (cf & 8) {
+ tcg_temp_free(cb);
+ }
+ if (cf & 1) {
+ cond.c = tcg_invert_cond(cond.c);
+ }
+
+ return cond;
+}
+
+/* Compute signed overflow for addition. */
+static TCGv do_add_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
+{
+ TCGv sv = get_temp(ctx);
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_xor_tl(sv, res, in1);
+ tcg_gen_xor_tl(tmp, in1, in2);
+ tcg_gen_andc_tl(sv, sv, tmp);
+ tcg_temp_free(tmp);
+
+ return sv;
+}
+
+/* Compute signed overflow for subtraction. */
+static TCGv do_sub_sv(DisasContext *ctx, TCGv res, TCGv in1, TCGv in2)
+{
+ TCGv sv = get_temp(ctx);
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_xor_tl(sv, res, in1);
+ tcg_gen_xor_tl(tmp, in1, in2);
+ tcg_gen_and_tl(sv, sv, tmp);
+ tcg_temp_free(tmp);
+
+ return sv;
+}
+
+static ExitStatus do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
+ unsigned shift, bool is_l, bool is_tsv, bool is_tc,
+ bool is_c, unsigned cf)
+{
+ TCGv dest, cb, cb_msb, sv, tmp;
+ unsigned c = cf >> 1;
+ DisasCond cond;
+
+ dest = tcg_temp_new();
+ TCGV_UNUSED(cb);
+ TCGV_UNUSED(cb_msb);
+
+ if (shift) {
+ tmp = get_temp(ctx);
+ tcg_gen_shli_tl(tmp, in1, shift);
+ in1 = tmp;
+ }
+
+ if (!is_l || c == 4 || c == 5) {
+ TCGv zero = tcg_const_tl(0);
+ cb_msb = get_temp(ctx);
+ tcg_gen_add2_tl(dest, cb_msb, in1, zero, in2, zero);
+ if (is_c) {
+ tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cpu_psw_cb_msb, zero);
+ }
+ tcg_temp_free(zero);
+ if (!is_l) {
+ cb = get_temp(ctx);
+ tcg_gen_xor_tl(cb, in1, in2);
+ tcg_gen_xor_tl(cb, cb, dest);
+ }
+ } else {
+ tcg_gen_add_tl(dest, in1, in2);
+ if (is_c) {
+ tcg_gen_add_tl(dest, dest, cpu_psw_cb_msb);
+ }
+ }
+
+ /* Compute signed overflow if required. */
+ TCGV_UNUSED(sv);
+ if (is_tsv || c == 6) {
+ sv = do_add_sv(ctx, dest, in1, in2);
+ if (is_tsv) {
+ /* ??? Need to include overflow from shift. */
+ gen_helper_tsv(cpu_env, sv);
+ }
+ }
+
+ /* Emit any conditional trap before any writeback. */
+ cond = do_cond(cf, dest, cb_msb, sv);
+ if (is_tc) {
+ cond_prep(&cond);
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ /* Write back the result. */
+ if (!is_l) {
+ save_or_nullify(ctx, cpu_psw_cb, cb);
+ save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
+ }
+ save_gpr(ctx, rt, dest);
+ tcg_temp_free(dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+ return NO_EXIT;
+}
+
+static ExitStatus do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
+ bool is_tsv, bool is_b, bool is_tc, unsigned cf)
+{
+ TCGv dest, sv, cb, cb_msb, zero, tmp;
+ unsigned c = cf >> 1;
+ DisasCond cond;
+
+ dest = tcg_temp_new();
+ cb = tcg_temp_new();
+ cb_msb = tcg_temp_new();
+
+ zero = tcg_const_tl(0);
+ if (is_b) {
+ /* DEST,C = IN1 + ~IN2 + C. */
+ tcg_gen_not_tl(cb, in2);
+ tcg_gen_add2_tl(dest, cb_msb, in1, zero, cpu_psw_cb_msb, zero);
+ tcg_gen_add2_tl(dest, cb_msb, dest, cb_msb, cb, zero);
+ tcg_gen_xor_tl(cb, cb, in1);
+ tcg_gen_xor_tl(cb, cb, dest);
+ } else {
+ /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
+ operations by seeding the high word with 1 and subtracting. */
+ tcg_gen_movi_tl(cb_msb, 1);
+ tcg_gen_sub2_tl(dest, cb_msb, in1, cb_msb, in2, zero);
+ tcg_gen_eqv_tl(cb, in1, in2);
+ tcg_gen_xor_tl(cb, cb, dest);
+ }
+ tcg_temp_free(zero);
+
+ /* Compute signed overflow if required. */
+ TCGV_UNUSED(sv);
+ if (is_tsv || c == 6) {
+ sv = do_sub_sv(ctx, dest, in1, in2);
+ if (is_tsv) {
+ gen_helper_tsv(cpu_env, sv);
+ }
+ }
+
+ /* Compute the condition. We cannot use the special case for borrow. */
+ if (!is_b) {
+ cond = do_sub_cond(cf, dest, in1, in2, sv);
+ } else {
+ cond = do_cond(cf, dest, cb_msb, sv);
+ }
+
+ /* Emit any conditional trap before any writeback. */
+ if (is_tc) {
+ cond_prep(&cond);
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+
+ /* Write back the result. */
+ save_or_nullify(ctx, cpu_psw_cb, cb);
+ save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
+ save_gpr(ctx, rt, dest);
+ tcg_temp_free(dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+ return NO_EXIT;
+}
+
+static ExitStatus do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
+ TCGv in2, unsigned cf)
+{
+ TCGv dest, sv;
+ DisasCond cond;
+
+ dest = tcg_temp_new();
+ tcg_gen_sub_tl(dest, in1, in2);
+
+ /* Compute signed overflow if required. */
+ TCGV_UNUSED(sv);
+ if ((cf >> 1) == 6) {
+ sv = do_sub_sv(ctx, dest, in1, in2);
+ }
+
+ /* Form the condition for the compare. */
+ cond = do_sub_cond(cf, dest, in1, in2, sv);
+
+ /* Clear. */
+ tcg_gen_movi_tl(dest, 0);
+ save_gpr(ctx, rt, dest);
+ tcg_temp_free(dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+ return NO_EXIT;
+}
+
+static ExitStatus do_log(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
+ unsigned cf, void (*fn)(TCGv, TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, rt);
+
+ /* Perform the operation, and writeback. */
+ fn(dest, in1, in2);
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (cf) {
+ ctx->null_cond = do_log_cond(cf, dest);
+ }
+ return NO_EXIT;
+}
+
+static ExitStatus do_unit(DisasContext *ctx, unsigned rt, TCGv in1,
+ TCGv in2, unsigned cf, bool is_tc,
+ void (*fn)(TCGv, TCGv, TCGv))
+{
+ TCGv dest;
+ DisasCond cond;
+
+ if (cf == 0) {
+ dest = dest_gpr(ctx, rt);
+ fn(dest, in1, in2);
+ save_gpr(ctx, rt, dest);
+ cond_free(&ctx->null_cond);
+ } else {
+ dest = tcg_temp_new();
+ fn(dest, in1, in2);
+
+ cond = do_unit_cond(cf, dest, in1, in2);
+
+ if (is_tc) {
+ TCGv tmp = tcg_temp_new();
+ cond_prep(&cond);
+ tcg_gen_setcond_tl(cond.c, tmp, cond.a0, cond.a1);
+ gen_helper_tcond(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+ save_gpr(ctx, rt, dest);
+
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond;
+ }
+ return NO_EXIT;
+}
+
+/* Emit a memory load. The modify parameter should be
+ * < 0 for pre-modify,
+ * > 0 for post-modify,
+ * = 0 for no base register update.
+ */
+static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify, TCGMemOp mop)
+{
+ TCGv addr, base;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ addr = tcg_temp_new();
+ base = load_gpr(ctx, rb);
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
+ tcg_gen_add_tl(addr, addr, base);
+ } else {
+ tcg_gen_addi_tl(addr, base, disp);
+ }
+
+ if (modify == 0) {
+ tcg_gen_qemu_ld_i32(dest, addr, MMU_USER_IDX, mop);
+ } else {
+ tcg_gen_qemu_ld_i32(dest, (modify < 0 ? addr : base),
+ MMU_USER_IDX, mop);
+ save_gpr(ctx, rb, addr);
+ }
+ tcg_temp_free(addr);
+}
+
+static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify, TCGMemOp mop)
+{
+ TCGv addr, base;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ addr = tcg_temp_new();
+ base = load_gpr(ctx, rb);
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
+ tcg_gen_add_tl(addr, addr, base);
+ } else {
+ tcg_gen_addi_tl(addr, base, disp);
+ }
+
+ if (modify == 0) {
+ tcg_gen_qemu_ld_i64(dest, addr, MMU_USER_IDX, mop);
+ } else {
+ tcg_gen_qemu_ld_i64(dest, (modify < 0 ? addr : base),
+ MMU_USER_IDX, mop);
+ save_gpr(ctx, rb, addr);
+ }
+ tcg_temp_free(addr);
+}
+
+static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify, TCGMemOp mop)
+{
+ TCGv addr, base;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ addr = tcg_temp_new();
+ base = load_gpr(ctx, rb);
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
+ tcg_gen_add_tl(addr, addr, base);
+ } else {
+ tcg_gen_addi_tl(addr, base, disp);
+ }
+
+ tcg_gen_qemu_st_i32(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
+
+ if (modify != 0) {
+ save_gpr(ctx, rb, addr);
+ }
+ tcg_temp_free(addr);
+}
+
+static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify, TCGMemOp mop)
+{
+ TCGv addr, base;
+
+ /* Caller uses nullify_over/nullify_end. */
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ addr = tcg_temp_new();
+ base = load_gpr(ctx, rb);
+
+ /* Note that RX is mutually exclusive with DISP. */
+ if (rx) {
+ tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
+ tcg_gen_add_tl(addr, addr, base);
+ } else {
+ tcg_gen_addi_tl(addr, base, disp);
+ }
+
+ tcg_gen_qemu_st_i64(src, (modify <= 0 ? addr : base), MMU_USER_IDX, mop);
+
+ if (modify != 0) {
+ save_gpr(ctx, rb, addr);
+ }
+ tcg_temp_free(addr);
+}
+
+#if TARGET_LONG_BITS == 64
+#define do_load_tl do_load_64
+#define do_store_tl do_store_64
+#else
+#define do_load_tl do_load_32
+#define do_store_tl do_store_32
+#endif
+
+static ExitStatus do_load(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify, TCGMemOp mop)
+{
+ TCGv dest;
+
+ nullify_over(ctx);
+
+ if (modify == 0) {
+ /* No base register update. */
+ dest = dest_gpr(ctx, rt);
+ } else {
+ /* Make sure if RT == RB, we see the result of the load. */
+ dest = get_temp(ctx);
+ }
+ do_load_tl(ctx, dest, rb, rx, scale, disp, modify, mop);
+ save_gpr(ctx, rt, dest);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify)
+{
+ TCGv_i32 tmp;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new_i32();
+ do_load_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
+ save_frw_i32(rt, tmp);
+ tcg_temp_free_i32(tmp);
+
+ if (rt == 0) {
+ gen_helper_loaded_fr0(cpu_env);
+ }
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify)
+{
+ TCGv_i64 tmp;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new_i64();
+ do_load_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
+ save_frd(rt, tmp);
+ tcg_temp_free_i64(tmp);
+
+ if (rt == 0) {
+ gen_helper_loaded_fr0(cpu_env);
+ }
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_store(DisasContext *ctx, unsigned rt, unsigned rb,
+ target_long disp, int modify, TCGMemOp mop)
+{
+ nullify_over(ctx);
+ do_store_tl(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, modify, mop);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify)
+{
+ TCGv_i32 tmp;
+
+ nullify_over(ctx);
+
+ tmp = load_frw_i32(rt);
+ do_store_32(ctx, tmp, rb, rx, scale, disp, modify, MO_TEUL);
+ tcg_temp_free_i32(tmp);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
+ unsigned rx, int scale, target_long disp,
+ int modify)
+{
+ TCGv_i64 tmp;
+
+ nullify_over(ctx);
+
+ tmp = load_frd(rt);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, modify, MO_TEQ);
+ tcg_temp_free_i64(tmp);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
+{
+ TCGv_i32 tmp;
+
+ nullify_over(ctx);
+ tmp = load_frw0_i32(ra);
+
+ func(tmp, cpu_env, tmp);
+
+ save_frw_i32(rt, tmp);
+ tcg_temp_free_i32(tmp);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ nullify_over(ctx);
+ src = load_frd(ra);
+ dst = tcg_temp_new_i32();
+
+ func(dst, cpu_env, src);
+
+ tcg_temp_free_i64(src);
+ save_frw_i32(rt, dst);
+ tcg_temp_free_i32(dst);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
+{
+ TCGv_i64 tmp;
+
+ nullify_over(ctx);
+ tmp = load_frd0(ra);
+
+ func(tmp, cpu_env, tmp);
+
+ save_frd(rt, tmp);
+ tcg_temp_free_i64(tmp);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
+ void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
+{
+ TCGv_i32 src;
+ TCGv_i64 dst;
+
+ nullify_over(ctx);
+ src = load_frw0_i32(ra);
+ dst = tcg_temp_new_i64();
+
+ func(dst, cpu_env, src);
+
+ tcg_temp_free_i32(src);
+ save_frd(rt, dst);
+ tcg_temp_free_i64(dst);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fop_weww(DisasContext *ctx, unsigned rt,
+ unsigned ra, unsigned rb,
+ void (*func)(TCGv_i32, TCGv_env,
+ TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 a, b;
+
+ nullify_over(ctx);
+ a = load_frw0_i32(ra);
+ b = load_frw0_i32(rb);
+
+ func(a, cpu_env, a, b);
+
+ tcg_temp_free_i32(b);
+ save_frw_i32(rt, a);
+ tcg_temp_free_i32(a);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus do_fop_dedd(DisasContext *ctx, unsigned rt,
+ unsigned ra, unsigned rb,
+ void (*func)(TCGv_i64, TCGv_env,
+ TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 a, b;
+
+ nullify_over(ctx);
+ a = load_frd0(ra);
+ b = load_frd0(rb);
+
+ func(a, cpu_env, a, b);
+
+ tcg_temp_free_i64(b);
+ save_frd(rt, a);
+ tcg_temp_free_i64(a);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+/* Emit an unconditional branch to a direct target, which may or may not
+ have already had nullification handled. */
+static ExitStatus do_dbranch(DisasContext *ctx, target_ulong dest,
+ unsigned link, bool is_n)
+{
+ if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
+ if (link != 0) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ ctx->iaoq_n = dest;
+ if (is_n) {
+ ctx->null_cond.c = TCG_COND_ALWAYS;
+ }
+ return NO_EXIT;
+ } else {
+ nullify_over(ctx);
+
+ if (link != 0) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+
+ if (is_n && use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, 0, dest, dest + 4);
+ } else {
+ nullify_set(ctx, is_n);
+ gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
+ }
+
+ nullify_end(ctx, NO_EXIT);
+
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
+ return EXIT_GOTO_TB;
+ }
+}
+
+/* Emit a conditional branch to a direct target. If the branch itself
+ is nullified, we should have already used nullify_over. */
+static ExitStatus do_cbranch(DisasContext *ctx, target_long disp, bool is_n,
+ DisasCond *cond)
+{
+ target_ulong dest = iaoq_dest(ctx, disp);
+ TCGLabel *taken = NULL;
+ TCGCond c = cond->c;
+ int which = 0;
+ bool n;
+
+ assert(ctx->null_cond.c == TCG_COND_NEVER);
+
+ /* Handle TRUE and NEVER as direct branches. */
+ if (c == TCG_COND_ALWAYS) {
+ return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
+ }
+ if (c == TCG_COND_NEVER) {
+ return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
+ }
+
+ taken = gen_new_label();
+ cond_prep(cond);
+ tcg_gen_brcond_tl(c, cond->a0, cond->a1, taken);
+ cond_free(cond);
+
+ /* Not taken: Condition not satisfied; nullify on backward branches. */
+ n = is_n && disp < 0;
+ if (n && use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, which++, ctx->iaoq_n, ctx->iaoq_n + 4);
+ } else {
+ if (!n && ctx->null_lab) {
+ gen_set_label(ctx->null_lab);
+ ctx->null_lab = NULL;
+ }
+ nullify_set(ctx, n);
+ gen_goto_tb(ctx, which++, ctx->iaoq_b, ctx->iaoq_n);
+ }
+
+ gen_set_label(taken);
+
+ /* Taken: Condition satisfied; nullify on forward branches. */
+ n = is_n && disp >= 0;
+ if (n && use_nullify_skip(ctx)) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, which++, dest, dest + 4);
+ } else {
+ nullify_set(ctx, n);
+ gen_goto_tb(ctx, which++, ctx->iaoq_b, dest);
+ }
+
+ /* Not taken: the branch itself was nullified. */
+ if (ctx->null_lab) {
+ gen_set_label(ctx->null_lab);
+ ctx->null_lab = NULL;
+ if (which < 2) {
+ nullify_set(ctx, 0);
+ gen_goto_tb(ctx, which, ctx->iaoq_b, ctx->iaoq_n);
+ return EXIT_GOTO_TB;
+ } else {
+ return EXIT_IAQ_N_STALE;
+ }
+ } else {
+ return EXIT_GOTO_TB;
+ }
+}
+
+/* Emit an unconditional branch to an indirect target. This handles
+ nullification of the branch itself. */
+static ExitStatus do_ibranch(DisasContext *ctx, TCGv dest,
+ unsigned link, bool is_n)
+{
+ TCGv a0, a1, next, tmp;
+ TCGCond c;
+
+ assert(ctx->null_lab == NULL);
+
+ if (ctx->null_cond.c == TCG_COND_NEVER) {
+ if (link != 0) {
+ copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
+ }
+ next = get_temp(ctx);
+ tcg_gen_mov_tl(next, dest);
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = next;
+ if (is_n) {
+ ctx->null_cond.c = TCG_COND_ALWAYS;
+ }
+ } else if (is_n && use_nullify_skip(ctx)) {
+ /* The (conditional) branch, B, nullifies the next insn, N,
+ and we're allowed to skip execution N (no single-step or
+ tracepoint in effect). Since the exit_tb that we must use
+ for the indirect branch consumes no special resources, we
+ can (conditionally) skip B and continue execution. */
+ /* The use_nullify_skip test implies we have a known control path. */
+ tcg_debug_assert(ctx->iaoq_b != -1);
+ tcg_debug_assert(ctx->iaoq_n != -1);
+
+ /* We do have to handle the non-local temporary, DEST, before
+ branching. Since IOAQ_F is not really live at this point, we
+ can simply store DEST optimistically. Similarly with IAOQ_B. */
+ tcg_gen_mov_tl(cpu_iaoq_f, dest);
+ tcg_gen_addi_tl(cpu_iaoq_b, dest, 4);
+
+ nullify_over(ctx);
+ if (link != 0) {
+ tcg_gen_movi_tl(cpu_gr[link], ctx->iaoq_n);
+ }
+ tcg_gen_exit_tb(0);
+ return nullify_end(ctx, NO_EXIT);
+ } else {
+ cond_prep(&ctx->null_cond);
+ c = ctx->null_cond.c;
+ a0 = ctx->null_cond.a0;
+ a1 = ctx->null_cond.a1;
+
+ tmp = tcg_temp_new();
+ next = get_temp(ctx);
+
+ copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
+ tcg_gen_movcond_tl(c, next, a0, a1, tmp, dest);
+ ctx->iaoq_n = -1;
+ ctx->iaoq_n_var = next;
+
+ if (link != 0) {
+ tcg_gen_movcond_tl(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
+ }
+
+ if (is_n) {
+ /* The branch nullifies the next insn, which means the state of N
+ after the branch is the inverse of the state of N that applied
+ to the branch. */
+ tcg_gen_setcond_tl(tcg_invert_cond(c), cpu_psw_n, a0, a1);
+ cond_free(&ctx->null_cond);
+ ctx->null_cond = cond_make_n();
+ ctx->psw_n_nonzero = true;
+ } else {
+ cond_free(&ctx->null_cond);
+ }
+ }
+
+ return NO_EXIT;
+}
+
+/* On Linux, page zero is normally marked execute only + gateway.
+ Therefore normal read or write is supposed to fail, but specific
+ offsets have kernel code mapped to raise permissions to implement
+ system calls. Handling this via an explicit check here, rather
+ in than the "be disp(sr2,r0)" instruction that probably sent us
+ here, is the easiest way to handle the branch delay slot on the
+ aforementioned BE. */
+static ExitStatus do_page_zero(DisasContext *ctx)
+{
+ /* If by some means we get here with PSW[N]=1, that implies that
+ the B,GATE instruction would be skipped, and we'd fault on the
+ next insn within the privilaged page. */
+ switch (ctx->null_cond.c) {
+ case TCG_COND_NEVER:
+ break;
+ case TCG_COND_ALWAYS:
+ tcg_gen_movi_tl(cpu_psw_n, 0);
+ goto do_sigill;
+ default:
+ /* Since this is always the first (and only) insn within the
+ TB, we should know the state of PSW[N] from TB->FLAGS. */
+ g_assert_not_reached();
+ }
+
+ /* Check that we didn't arrive here via some means that allowed
+ non-sequential instruction execution. Normally the PSW[B] bit
+ detects this by disallowing the B,GATE instruction to execute
+ under such conditions. */
+ if (ctx->iaoq_b != ctx->iaoq_f + 4) {
+ goto do_sigill;
+ }
+
+ switch (ctx->iaoq_f) {
+ case 0x00: /* Null pointer call */
+ gen_excp_1(EXCP_SIGSEGV);
+ return EXIT_NORETURN;
+
+ case 0xb0: /* LWS */
+ gen_excp_1(EXCP_SYSCALL_LWS);
+ return EXIT_NORETURN;
+
+ case 0xe0: /* SET_THREAD_POINTER */
+ tcg_gen_mov_tl(cpu_cr27, cpu_gr[26]);
+ tcg_gen_mov_tl(cpu_iaoq_f, cpu_gr[31]);
+ tcg_gen_addi_tl(cpu_iaoq_b, cpu_iaoq_f, 4);
+ return EXIT_IAQ_N_UPDATED;
+
+ case 0x100: /* SYSCALL */
+ gen_excp_1(EXCP_SYSCALL);
+ return EXIT_NORETURN;
+
+ default:
+ do_sigill:
+ gen_excp_1(EXCP_SIGILL);
+ return EXIT_NORETURN;
+ }
+}
+
+static ExitStatus trans_nop(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_break(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ nullify_over(ctx);
+ return nullify_end(ctx, gen_excp(ctx, EXCP_DEBUG));
+}
+
+static ExitStatus trans_sync(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ /* No point in nullifying the memory barrier. */
+ tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_mfia(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tmp = dest_gpr(ctx, rt);
+ tcg_gen_movi_tl(tmp, ctx->iaoq_f);
+ save_gpr(ctx, rt, tmp);
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_mfsp(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tmp = dest_gpr(ctx, rt);
+
+ /* ??? We don't implement space registers. */
+ tcg_gen_movi_tl(tmp, 0);
+ save_gpr(ctx, rt, tmp);
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_mfctl(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned ctl = extract32(insn, 21, 5);
+ TCGv tmp;
+
+ switch (ctl) {
+ case 11: /* SAR */
+#ifdef TARGET_HPPA64
+ if (extract32(insn, 14, 1) == 0) {
+ /* MFSAR without ,W masks low 5 bits. */
+ tmp = dest_gpr(ctx, rt);
+ tcg_gen_andi_tl(tmp, cpu_sar, 31);
+ save_gpr(ctx, rt, tmp);
+ break;
+ }
+#endif
+ save_gpr(ctx, rt, cpu_sar);
+ break;
+ case 16: /* Interval Timer */
+ tmp = dest_gpr(ctx, rt);
+ tcg_gen_movi_tl(tmp, 0); /* FIXME */
+ save_gpr(ctx, rt, tmp);
+ break;
+ case 26:
+ save_gpr(ctx, rt, cpu_cr26);
+ break;
+ case 27:
+ save_gpr(ctx, rt, cpu_cr27);
+ break;
+ default:
+ /* All other control registers are privileged. */
+ return gen_illegal(ctx);
+ }
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_mtctl(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rin = extract32(insn, 16, 5);
+ unsigned ctl = extract32(insn, 21, 5);
+ TCGv tmp;
+
+ if (ctl == 11) { /* SAR */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_tl(tmp, load_gpr(ctx, rin), TARGET_LONG_BITS - 1);
+ save_or_nullify(ctx, cpu_sar, tmp);
+ tcg_temp_free(tmp);
+ } else {
+ /* All other control registers are privileged or read-only. */
+ return gen_illegal(ctx);
+ }
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_mtsarcm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rin = extract32(insn, 16, 5);
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_not_tl(tmp, load_gpr(ctx, rin));
+ tcg_gen_andi_tl(tmp, tmp, TARGET_LONG_BITS - 1);
+ save_or_nullify(ctx, cpu_sar, tmp);
+ tcg_temp_free(tmp);
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_ldsid(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv dest = dest_gpr(ctx, rt);
+
+ /* Since we don't implement space registers, this returns zero. */
+ tcg_gen_movi_tl(dest, 0);
+ save_gpr(ctx, rt, dest);
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static const DisasInsn table_system[] = {
+ { 0x00000000u, 0xfc001fe0u, trans_break },
+ /* We don't implement space register, so MTSP is a nop. */
+ { 0x00001820u, 0xffe01fffu, trans_nop },
+ { 0x00001840u, 0xfc00ffffu, trans_mtctl },
+ { 0x016018c0u, 0xffe0ffffu, trans_mtsarcm },
+ { 0x000014a0u, 0xffffffe0u, trans_mfia },
+ { 0x000004a0u, 0xffff1fe0u, trans_mfsp },
+ { 0x000008a0u, 0xfc1fffe0u, trans_mfctl },
+ { 0x00000400u, 0xffffffffu, trans_sync },
+ { 0x000010a0u, 0xfc1f3fe0u, trans_ldsid },
+};
+
+static ExitStatus trans_base_idx_mod(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rx = extract32(insn, 16, 5);
+ TCGv dest = dest_gpr(ctx, rb);
+ TCGv src1 = load_gpr(ctx, rb);
+ TCGv src2 = load_gpr(ctx, rx);
+
+ /* The only thing we need to do is the base register modification. */
+ tcg_gen_add_tl(dest, src1, src2);
+ save_gpr(ctx, rb, dest);
+
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_probe(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned is_write = extract32(insn, 6, 1);
+ TCGv dest;
+
+ nullify_over(ctx);
+
+ /* ??? Do something with priv level operand. */
+ dest = dest_gpr(ctx, rt);
+ if (is_write) {
+ gen_helper_probe_w(dest, load_gpr(ctx, rb));
+ } else {
+ gen_helper_probe_r(dest, load_gpr(ctx, rb));
+ }
+ save_gpr(ctx, rt, dest);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static const DisasInsn table_mem_mgmt[] = {
+ { 0x04003280u, 0xfc003fffu, trans_nop }, /* fdc, disp */
+ { 0x04001280u, 0xfc003fffu, trans_nop }, /* fdc, index */
+ { 0x040012a0u, 0xfc003fffu, trans_base_idx_mod }, /* fdc, index, base mod */
+ { 0x040012c0u, 0xfc003fffu, trans_nop }, /* fdce */
+ { 0x040012e0u, 0xfc003fffu, trans_base_idx_mod }, /* fdce, base mod */
+ { 0x04000280u, 0xfc001fffu, trans_nop }, /* fic 0a */
+ { 0x040002a0u, 0xfc001fffu, trans_base_idx_mod }, /* fic 0a, base mod */
+ { 0x040013c0u, 0xfc003fffu, trans_nop }, /* fic 4f */
+ { 0x040013e0u, 0xfc003fffu, trans_base_idx_mod }, /* fic 4f, base mod */
+ { 0x040002c0u, 0xfc001fffu, trans_nop }, /* fice */
+ { 0x040002e0u, 0xfc001fffu, trans_base_idx_mod }, /* fice, base mod */
+ { 0x04002700u, 0xfc003fffu, trans_nop }, /* pdc */
+ { 0x04002720u, 0xfc003fffu, trans_base_idx_mod }, /* pdc, base mod */
+ { 0x04001180u, 0xfc003fa0u, trans_probe }, /* probe */
+ { 0x04003180u, 0xfc003fa0u, trans_probe }, /* probei */
+};
+
+static ExitStatus trans_add(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned ext = extract32(insn, 8, 4);
+ unsigned shift = extract32(insn, 6, 2);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tcg_r1, tcg_r2;
+ bool is_c = false;
+ bool is_l = false;
+ bool is_tc = false;
+ bool is_tsv = false;
+ ExitStatus ret;
+
+ switch (ext) {
+ case 0x6: /* ADD, SHLADD */
+ break;
+ case 0xa: /* ADD,L, SHLADD,L */
+ is_l = true;
+ break;
+ case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
+ is_tsv = true;
+ break;
+ case 0x7: /* ADD,C */
+ is_c = true;
+ break;
+ case 0xf: /* ADD,C,TSV */
+ is_c = is_tsv = true;
+ break;
+ default:
+ return gen_illegal(ctx);
+ }
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, r1);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_add(ctx, rt, tcg_r1, tcg_r2, shift, is_l, is_tsv, is_tc, is_c, cf);
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_sub(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned ext = extract32(insn, 6, 6);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tcg_r1, tcg_r2;
+ bool is_b = false;
+ bool is_tc = false;
+ bool is_tsv = false;
+ ExitStatus ret;
+
+ switch (ext) {
+ case 0x10: /* SUB */
+ break;
+ case 0x30: /* SUB,TSV */
+ is_tsv = true;
+ break;
+ case 0x14: /* SUB,B */
+ is_b = true;
+ break;
+ case 0x34: /* SUB,B,TSV */
+ is_b = is_tsv = true;
+ break;
+ case 0x13: /* SUB,TC */
+ is_tc = true;
+ break;
+ case 0x33: /* SUB,TSV,TC */
+ is_tc = is_tsv = true;
+ break;
+ default:
+ return gen_illegal(ctx);
+ }
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, r1);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_sub(ctx, rt, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, cf);
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_log(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tcg_r1, tcg_r2;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, r1);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_log(ctx, rt, tcg_r1, tcg_r2, cf, di->f_ttt);
+ return nullify_end(ctx, ret);
+}
+
+/* OR r,0,t -> COPY (according to gas) */
+static ExitStatus trans_copy(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned rt = extract32(insn, 0, 5);
+
+ if (r1 == 0) {
+ TCGv dest = dest_gpr(ctx, rt);
+ tcg_gen_movi_tl(dest, 0);
+ save_gpr(ctx, rt, dest);
+ } else {
+ save_gpr(ctx, rt, cpu_gr[r1]);
+ }
+ cond_free(&ctx->null_cond);
+ return NO_EXIT;
+}
+
+static ExitStatus trans_cmpclr(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tcg_r1, tcg_r2;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, r1);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_cmpclr(ctx, rt, tcg_r1, tcg_r2, cf);
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_uxor(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tcg_r1, tcg_r2;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, r1);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_unit(ctx, rt, tcg_r1, tcg_r2, cf, false, tcg_gen_xor_tl);
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_uaddcm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned is_tc = extract32(insn, 6, 1);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tcg_r1, tcg_r2, tmp;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+ tcg_r1 = load_gpr(ctx, r1);
+ tcg_r2 = load_gpr(ctx, r2);
+ tmp = get_temp(ctx);
+ tcg_gen_not_tl(tmp, tcg_r2);
+ ret = do_unit(ctx, rt, tcg_r1, tmp, cf, is_tc, tcg_gen_add_tl);
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_dcor(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned is_i = extract32(insn, 6, 1);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv tmp;
+ ExitStatus ret;
+
+ nullify_over(ctx);
+
+ tmp = get_temp(ctx);
+ tcg_gen_shri_tl(tmp, cpu_psw_cb, 3);
+ if (!is_i) {
+ tcg_gen_not_tl(tmp, tmp);
+ }
+ tcg_gen_andi_tl(tmp, tmp, 0x11111111);
+ tcg_gen_muli_tl(tmp, tmp, 6);
+ ret = do_unit(ctx, rt, tmp, load_gpr(ctx, r2), cf, false,
+ is_i ? tcg_gen_add_tl : tcg_gen_sub_tl);
+
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_ds(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 0, 5);
+ TCGv dest, add1, add2, addc, zero, in1, in2;
+
+ nullify_over(ctx);
+
+ in1 = load_gpr(ctx, r1);
+ in2 = load_gpr(ctx, r2);
+
+ add1 = tcg_temp_new();
+ add2 = tcg_temp_new();
+ addc = tcg_temp_new();
+ dest = tcg_temp_new();
+ zero = tcg_const_tl(0);
+
+ /* Form R1 << 1 | PSW[CB]{8}. */
+ tcg_gen_add_tl(add1, in1, in1);
+ tcg_gen_add_tl(add1, add1, cpu_psw_cb_msb);
+
+ /* Add or subtract R2, depending on PSW[V]. Proper computation of
+ carry{8} requires that we subtract via + ~R2 + 1, as described in
+ the manual. By extracting and masking V, we can produce the
+ proper inputs to the addition without movcond. */
+ tcg_gen_sari_tl(addc, cpu_psw_v, TARGET_LONG_BITS - 1);
+ tcg_gen_xor_tl(add2, in2, addc);
+ tcg_gen_andi_tl(addc, addc, 1);
+ /* ??? This is only correct for 32-bit. */
+ tcg_gen_add2_i32(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
+ tcg_gen_add2_i32(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
+
+ tcg_temp_free(addc);
+ tcg_temp_free(zero);
+
+ /* Write back the result register. */
+ save_gpr(ctx, rt, dest);
+
+ /* Write back PSW[CB]. */
+ tcg_gen_xor_tl(cpu_psw_cb, add1, add2);
+ tcg_gen_xor_tl(cpu_psw_cb, cpu_psw_cb, dest);
+
+ /* Write back PSW[V] for the division step. */
+ tcg_gen_neg_tl(cpu_psw_v, cpu_psw_cb_msb);
+ tcg_gen_xor_tl(cpu_psw_v, cpu_psw_v, in2);
+
+ /* Install the new nullification. */
+ if (cf) {
+ TCGv sv;
+ TCGV_UNUSED(sv);
+ if (cf >> 1 == 6) {
+ /* ??? The lshift is supposed to contribute to overflow. */
+ sv = do_add_sv(ctx, dest, add1, add2);
+ }
+ ctx->null_cond = do_cond(cf, dest, cpu_psw_cb_msb, sv);
+ }
+
+ tcg_temp_free(add1);
+ tcg_temp_free(add2);
+ tcg_temp_free(dest);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static const DisasInsn table_arith_log[] = {
+ { 0x08000240u, 0xfc00ffffu, trans_nop }, /* or x,y,0 */
+ { 0x08000240u, 0xffe0ffe0u, trans_copy }, /* or x,0,t */
+ { 0x08000000u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_andc_tl },
+ { 0x08000200u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_and_tl },
+ { 0x08000240u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_or_tl },
+ { 0x08000280u, 0xfc000fe0u, trans_log, .f_ttt = tcg_gen_xor_tl },
+ { 0x08000880u, 0xfc000fe0u, trans_cmpclr },
+ { 0x08000380u, 0xfc000fe0u, trans_uxor },
+ { 0x08000980u, 0xfc000fa0u, trans_uaddcm },
+ { 0x08000b80u, 0xfc1f0fa0u, trans_dcor },
+ { 0x08000440u, 0xfc000fe0u, trans_ds },
+ { 0x08000700u, 0xfc0007e0u, trans_add }, /* add */
+ { 0x08000400u, 0xfc0006e0u, trans_sub }, /* sub; sub,b; sub,tsv */
+ { 0x080004c0u, 0xfc0007e0u, trans_sub }, /* sub,tc; sub,tsv,tc */
+ { 0x08000200u, 0xfc000320u, trans_add }, /* shladd */
+};
+
+static ExitStatus trans_addi(DisasContext *ctx, uint32_t insn)
+{
+ target_long im = low_sextract(insn, 0, 11);
+ unsigned e1 = extract32(insn, 11, 1);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 16, 5);
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned o1 = extract32(insn, 26, 1);
+ TCGv tcg_im, tcg_r2;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+
+ tcg_im = load_const(ctx, im);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_add(ctx, rt, tcg_im, tcg_r2, 0, false, e1, !o1, false, cf);
+
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_subi(DisasContext *ctx, uint32_t insn)
+{
+ target_long im = low_sextract(insn, 0, 11);
+ unsigned e1 = extract32(insn, 11, 1);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 16, 5);
+ unsigned r2 = extract32(insn, 21, 5);
+ TCGv tcg_im, tcg_r2;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+
+ tcg_im = load_const(ctx, im);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_sub(ctx, rt, tcg_im, tcg_r2, e1, false, false, cf);
+
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_cmpiclr(DisasContext *ctx, uint32_t insn)
+{
+ target_long im = low_sextract(insn, 0, 11);
+ unsigned cf = extract32(insn, 12, 4);
+ unsigned rt = extract32(insn, 16, 5);
+ unsigned r2 = extract32(insn, 21, 5);
+ TCGv tcg_im, tcg_r2;
+ ExitStatus ret;
+
+ if (cf) {
+ nullify_over(ctx);
+ }
+
+ tcg_im = load_const(ctx, im);
+ tcg_r2 = load_gpr(ctx, r2);
+ ret = do_cmpclr(ctx, rt, tcg_im, tcg_r2, cf);
+
+ return nullify_end(ctx, ret);
+}
+
+static ExitStatus trans_ld_idx_i(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sz = extract32(insn, 6, 2);
+ unsigned a = extract32(insn, 13, 1);
+ int disp = low_sextract(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ int modify = (m ? (a ? -1 : 1) : 0);
+ TCGMemOp mop = MO_TE | sz;
+
+ return do_load(ctx, rt, rb, 0, 0, disp, modify, mop);
+}
+
+static ExitStatus trans_ld_idx_x(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sz = extract32(insn, 6, 2);
+ unsigned u = extract32(insn, 13, 1);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGMemOp mop = MO_TE | sz;
+
+ return do_load(ctx, rt, rb, rx, u ? sz : 0, 0, m, mop);
+}
+
+static ExitStatus trans_st_idx_i(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ int disp = low_sextract(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned sz = extract32(insn, 6, 2);
+ unsigned a = extract32(insn, 13, 1);
+ unsigned rr = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ int modify = (m ? (a ? -1 : 1) : 0);
+ TCGMemOp mop = MO_TE | sz;
+
+ return do_store(ctx, rr, rb, disp, modify, mop);
+}
+
+static ExitStatus trans_ldcw(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned i = extract32(insn, 12, 1);
+ unsigned au = extract32(insn, 13, 1);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGMemOp mop = MO_TEUL | MO_ALIGN_16;
+ TCGv zero, addr, base, dest;
+ int modify, disp = 0, scale = 0;
+
+ nullify_over(ctx);
+
+ /* ??? Share more code with do_load and do_load_{32,64}. */
+
+ if (i) {
+ modify = (m ? (au ? -1 : 1) : 0);
+ disp = low_sextract(rx, 0, 5);
+ rx = 0;
+ } else {
+ modify = m;
+ if (au) {
+ scale = mop & MO_SIZE;
+ }
+ }
+ if (modify) {
+ /* Base register modification. Make sure if RT == RB, we see
+ the result of the load. */
+ dest = get_temp(ctx);
+ } else {
+ dest = dest_gpr(ctx, rt);
+ }
+
+ addr = tcg_temp_new();
+ base = load_gpr(ctx, rb);
+ if (rx) {
+ tcg_gen_shli_tl(addr, cpu_gr[rx], scale);
+ tcg_gen_add_tl(addr, addr, base);
+ } else {
+ tcg_gen_addi_tl(addr, base, disp);
+ }
+
+ zero = tcg_const_tl(0);
+ tcg_gen_atomic_xchg_tl(dest, (modify <= 0 ? addr : base),
+ zero, MMU_USER_IDX, mop);
+ if (modify) {
+ save_gpr(ctx, rb, addr);
+ }
+ save_gpr(ctx, rt, dest);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_stby(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ target_long disp = low_sextract(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned a = extract32(insn, 13, 1);
+ unsigned rt = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGv addr, val;
+
+ nullify_over(ctx);
+
+ addr = tcg_temp_new();
+ if (m || disp == 0) {
+ tcg_gen_mov_tl(addr, load_gpr(ctx, rb));
+ } else {
+ tcg_gen_addi_tl(addr, load_gpr(ctx, rb), disp);
+ }
+ val = load_gpr(ctx, rt);
+
+ if (a) {
+ gen_helper_stby_e(cpu_env, addr, val);
+ } else {
+ gen_helper_stby_b(cpu_env, addr, val);
+ }
+
+ if (m) {
+ tcg_gen_addi_tl(addr, addr, disp);
+ tcg_gen_andi_tl(addr, addr, ~3);
+ save_gpr(ctx, rb, addr);
+ }
+ tcg_temp_free(addr);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static const DisasInsn table_index_mem[] = {
+ { 0x0c001000u, 0xfc001300, trans_ld_idx_i }, /* LD[BHWD], im */
+ { 0x0c000000u, 0xfc001300, trans_ld_idx_x }, /* LD[BHWD], rx */
+ { 0x0c001200u, 0xfc001300, trans_st_idx_i }, /* ST[BHWD] */
+ { 0x0c0001c0u, 0xfc0003c0, trans_ldcw },
+ { 0x0c001300u, 0xfc0013c0, trans_stby },
+};
+
+static ExitStatus trans_ldil(DisasContext *ctx, uint32_t insn)
+{
+ unsigned rt = extract32(insn, 21, 5);
+ target_long i = assemble_21(insn);
+ TCGv tcg_rt = dest_gpr(ctx, rt);
+
+ tcg_gen_movi_tl(tcg_rt, i);
+ save_gpr(ctx, rt, tcg_rt);
+ cond_free(&ctx->null_cond);
+
+ return NO_EXIT;
+}
+
+static ExitStatus trans_addil(DisasContext *ctx, uint32_t insn)
+{
+ unsigned rt = extract32(insn, 21, 5);
+ target_long i = assemble_21(insn);
+ TCGv tcg_rt = load_gpr(ctx, rt);
+ TCGv tcg_r1 = dest_gpr(ctx, 1);
+
+ tcg_gen_addi_tl(tcg_r1, tcg_rt, i);
+ save_gpr(ctx, 1, tcg_r1);
+ cond_free(&ctx->null_cond);
+
+ return NO_EXIT;
+}
+
+static ExitStatus trans_ldo(DisasContext *ctx, uint32_t insn)
+{
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rt = extract32(insn, 16, 5);
+ target_long i = assemble_16(insn);
+ TCGv tcg_rt = dest_gpr(ctx, rt);
+
+ /* Special case rb == 0, for the LDI pseudo-op.
+ The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
+ if (rb == 0) {
+ tcg_gen_movi_tl(tcg_rt, i);
+ } else {
+ tcg_gen_addi_tl(tcg_rt, cpu_gr[rb], i);
+ }
+ save_gpr(ctx, rt, tcg_rt);
+ cond_free(&ctx->null_cond);
+
+ return NO_EXIT;
+}
+
+static ExitStatus trans_load(DisasContext *ctx, uint32_t insn,
+ bool is_mod, TCGMemOp mop)
+{
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rt = extract32(insn, 16, 5);
+ target_long i = assemble_16(insn);
+
+ return do_load(ctx, rt, rb, 0, 0, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
+}
+
+static ExitStatus trans_load_w(DisasContext *ctx, uint32_t insn)
+{
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rt = extract32(insn, 16, 5);
+ target_long i = assemble_16a(insn);
+ unsigned ext2 = extract32(insn, 1, 2);
+
+ switch (ext2) {
+ case 0:
+ case 1:
+ /* FLDW without modification. */
+ return do_floadw(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
+ case 2:
+ /* LDW with modification. Note that the sign of I selects
+ post-dec vs pre-inc. */
+ return do_load(ctx, rt, rb, 0, 0, i, (i < 0 ? 1 : -1), MO_TEUL);
+ default:
+ return gen_illegal(ctx);
+ }
+}
+
+static ExitStatus trans_fload_mod(DisasContext *ctx, uint32_t insn)
+{
+ target_long i = assemble_16a(insn);
+ unsigned t1 = extract32(insn, 1, 1);
+ unsigned a = extract32(insn, 2, 1);
+ unsigned t0 = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+
+ /* FLDW with modification. */
+ return do_floadw(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
+}
+
+static ExitStatus trans_store(DisasContext *ctx, uint32_t insn,
+ bool is_mod, TCGMemOp mop)
+{
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rt = extract32(insn, 16, 5);
+ target_long i = assemble_16(insn);
+
+ return do_store(ctx, rt, rb, i, is_mod ? (i < 0 ? -1 : 1) : 0, mop);
+}
+
+static ExitStatus trans_store_w(DisasContext *ctx, uint32_t insn)
+{
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rt = extract32(insn, 16, 5);
+ target_long i = assemble_16a(insn);
+ unsigned ext2 = extract32(insn, 1, 2);
+
+ switch (ext2) {
+ case 0:
+ case 1:
+ /* FSTW without modification. */
+ return do_fstorew(ctx, ext2 * 32 + rt, rb, 0, 0, i, 0);
+ case 2:
+ /* LDW with modification. */
+ return do_store(ctx, rt, rb, i, (i < 0 ? 1 : -1), MO_TEUL);
+ default:
+ return gen_illegal(ctx);
+ }
+}
+
+static ExitStatus trans_fstore_mod(DisasContext *ctx, uint32_t insn)
+{
+ target_long i = assemble_16a(insn);
+ unsigned t1 = extract32(insn, 1, 1);
+ unsigned a = extract32(insn, 2, 1);
+ unsigned t0 = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+
+ /* FSTW with modification. */
+ return do_fstorew(ctx, t1 * 32 + t0, rb, 0, 0, i, (a ? -1 : 1));
+}
+
+static ExitStatus trans_copr_w(DisasContext *ctx, uint32_t insn)
+{
+ unsigned t0 = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned t1 = extract32(insn, 6, 1);
+ unsigned ext3 = extract32(insn, 7, 3);
+ /* unsigned cc = extract32(insn, 10, 2); */
+ unsigned i = extract32(insn, 12, 1);
+ unsigned ua = extract32(insn, 13, 1);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned rt = t1 * 32 + t0;
+ int modify = (m ? (ua ? -1 : 1) : 0);
+ int disp, scale;
+
+ if (i == 0) {
+ scale = (ua ? 2 : 0);
+ disp = 0;
+ modify = m;
+ } else {
+ disp = low_sextract(rx, 0, 5);
+ scale = 0;
+ rx = 0;
+ modify = (m ? (ua ? -1 : 1) : 0);
+ }
+
+ switch (ext3) {
+ case 0: /* FLDW */
+ return do_floadw(ctx, rt, rb, rx, scale, disp, modify);
+ case 4: /* FSTW */
+ return do_fstorew(ctx, rt, rb, rx, scale, disp, modify);
+ }
+ return gen_illegal(ctx);
+}
+
+static ExitStatus trans_copr_dw(DisasContext *ctx, uint32_t insn)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned m = extract32(insn, 5, 1);
+ unsigned ext4 = extract32(insn, 6, 4);
+ /* unsigned cc = extract32(insn, 10, 2); */
+ unsigned i = extract32(insn, 12, 1);
+ unsigned ua = extract32(insn, 13, 1);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ int modify = (m ? (ua ? -1 : 1) : 0);
+ int disp, scale;
+
+ if (i == 0) {
+ scale = (ua ? 3 : 0);
+ disp = 0;
+ modify = m;
+ } else {
+ disp = low_sextract(rx, 0, 5);
+ scale = 0;
+ rx = 0;
+ modify = (m ? (ua ? -1 : 1) : 0);
+ }
+
+ switch (ext4) {
+ case 0: /* FLDD */
+ return do_floadd(ctx, rt, rb, rx, scale, disp, modify);
+ case 8: /* FSTD */
+ return do_fstored(ctx, rt, rb, rx, scale, disp, modify);
+ default:
+ return gen_illegal(ctx);
+ }
+}
+
+static ExitStatus trans_cmpb(DisasContext *ctx, uint32_t insn,
+ bool is_true, bool is_imm, bool is_dw)
+{
+ target_long disp = assemble_12(insn) * 4;
+ unsigned n = extract32(insn, 1, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned r = extract32(insn, 21, 5);
+ unsigned cf = c * 2 + !is_true;
+ TCGv dest, in1, in2, sv;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ if (is_imm) {
+ in1 = load_const(ctx, low_sextract(insn, 16, 5));
+ } else {
+ in1 = load_gpr(ctx, extract32(insn, 16, 5));
+ }
+ in2 = load_gpr(ctx, r);
+ dest = get_temp(ctx);
+
+ tcg_gen_sub_tl(dest, in1, in2);
+
+ TCGV_UNUSED(sv);
+ if (c == 6) {
+ sv = do_sub_sv(ctx, dest, in1, in2);
+ }
+
+ cond = do_sub_cond(cf, dest, in1, in2, sv);
+ return do_cbranch(ctx, disp, n, &cond);
+}
+
+static ExitStatus trans_addb(DisasContext *ctx, uint32_t insn,
+ bool is_true, bool is_imm)
+{
+ target_long disp = assemble_12(insn) * 4;
+ unsigned n = extract32(insn, 1, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned r = extract32(insn, 21, 5);
+ unsigned cf = c * 2 + !is_true;
+ TCGv dest, in1, in2, sv, cb_msb;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ if (is_imm) {
+ in1 = load_const(ctx, low_sextract(insn, 16, 5));
+ } else {
+ in1 = load_gpr(ctx, extract32(insn, 16, 5));
+ }
+ in2 = load_gpr(ctx, r);
+ dest = dest_gpr(ctx, r);
+ TCGV_UNUSED(sv);
+ TCGV_UNUSED(cb_msb);
+
+ switch (c) {
+ default:
+ tcg_gen_add_tl(dest, in1, in2);
+ break;
+ case 4: case 5:
+ cb_msb = get_temp(ctx);
+ tcg_gen_movi_tl(cb_msb, 0);
+ tcg_gen_add2_tl(dest, cb_msb, in1, cb_msb, in2, cb_msb);
+ break;
+ case 6:
+ tcg_gen_add_tl(dest, in1, in2);
+ sv = do_add_sv(ctx, dest, in1, in2);
+ break;
+ }
+
+ cond = do_cond(cf, dest, cb_msb, sv);
+ return do_cbranch(ctx, disp, n, &cond);
+}
+
+static ExitStatus trans_bb(DisasContext *ctx, uint32_t insn)
+{
+ target_long disp = assemble_12(insn) * 4;
+ unsigned n = extract32(insn, 1, 1);
+ unsigned c = extract32(insn, 15, 1);
+ unsigned r = extract32(insn, 16, 5);
+ unsigned p = extract32(insn, 21, 5);
+ unsigned i = extract32(insn, 26, 1);
+ TCGv tmp, tcg_r;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ tmp = tcg_temp_new();
+ tcg_r = load_gpr(ctx, r);
+ if (i) {
+ tcg_gen_shli_tl(tmp, tcg_r, p);
+ } else {
+ tcg_gen_shl_tl(tmp, tcg_r, cpu_sar);
+ }
+
+ cond = cond_make_0(c ? TCG_COND_GE : TCG_COND_LT, tmp);
+ tcg_temp_free(tmp);
+ return do_cbranch(ctx, disp, n, &cond);
+}
+
+static ExitStatus trans_movb(DisasContext *ctx, uint32_t insn, bool is_imm)
+{
+ target_long disp = assemble_12(insn) * 4;
+ unsigned n = extract32(insn, 1, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned t = extract32(insn, 16, 5);
+ unsigned r = extract32(insn, 21, 5);
+ TCGv dest;
+ DisasCond cond;
+
+ nullify_over(ctx);
+
+ dest = dest_gpr(ctx, r);
+ if (is_imm) {
+ tcg_gen_movi_tl(dest, low_sextract(t, 0, 5));
+ } else if (t == 0) {
+ tcg_gen_movi_tl(dest, 0);
+ } else {
+ tcg_gen_mov_tl(dest, cpu_gr[t]);
+ }
+
+ cond = do_sed_cond(c, dest);
+ return do_cbranch(ctx, disp, n, &cond);
+}
+
+static ExitStatus trans_shrpw_sar(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned r2 = extract32(insn, 21, 5);
+ TCGv dest;
+
+ if (c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, rt);
+ if (r1 == 0) {
+ tcg_gen_ext32u_tl(dest, load_gpr(ctx, r2));
+ tcg_gen_shr_tl(dest, dest, cpu_sar);
+ } else if (r1 == r2) {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t32, load_gpr(ctx, r2));
+ tcg_gen_rotr_i32(t32, t32, cpu_sar);
+ tcg_gen_extu_i32_tl(dest, t32);
+ tcg_temp_free_i32(t32);
+ } else {
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 s = tcg_temp_new_i64();
+
+ tcg_gen_concat_tl_i64(t, load_gpr(ctx, r2), load_gpr(ctx, r1));
+ tcg_gen_extu_tl_i64(s, cpu_sar);
+ tcg_gen_shr_i64(t, t, s);
+ tcg_gen_trunc_i64_tl(dest, t);
+
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(s);
+ }
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_shrpw_imm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned cpos = extract32(insn, 5, 5);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned r1 = extract32(insn, 16, 5);
+ unsigned r2 = extract32(insn, 21, 5);
+ unsigned sa = 31 - cpos;
+ TCGv dest, t2;
+
+ if (c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, rt);
+ t2 = load_gpr(ctx, r2);
+ if (r1 == r2) {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t32, t2);
+ tcg_gen_rotri_i32(t32, t32, sa);
+ tcg_gen_extu_i32_tl(dest, t32);
+ tcg_temp_free_i32(t32);
+ } else if (r1 == 0) {
+ tcg_gen_extract_tl(dest, t2, sa, 32 - sa);
+ } else {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_extract_tl(t0, t2, sa, 32 - sa);
+ tcg_gen_deposit_tl(dest, t0, cpu_gr[r1], 32 - sa, sa);
+ tcg_temp_free(t0);
+ }
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_extrw_sar(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned clen = extract32(insn, 0, 5);
+ unsigned is_se = extract32(insn, 10, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned rt = extract32(insn, 16, 5);
+ unsigned rr = extract32(insn, 21, 5);
+ unsigned len = 32 - clen;
+ TCGv dest, src, tmp;
+
+ if (c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, rt);
+ src = load_gpr(ctx, rr);
+ tmp = tcg_temp_new();
+
+ /* Recall that SAR is using big-endian bit numbering. */
+ tcg_gen_xori_tl(tmp, cpu_sar, TARGET_LONG_BITS - 1);
+ if (is_se) {
+ tcg_gen_sar_tl(dest, src, tmp);
+ tcg_gen_sextract_tl(dest, dest, 0, len);
+ } else {
+ tcg_gen_shr_tl(dest, src, tmp);
+ tcg_gen_extract_tl(dest, dest, 0, len);
+ }
+ tcg_temp_free(tmp);
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_extrw_imm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned clen = extract32(insn, 0, 5);
+ unsigned pos = extract32(insn, 5, 5);
+ unsigned is_se = extract32(insn, 10, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned rt = extract32(insn, 16, 5);
+ unsigned rr = extract32(insn, 21, 5);
+ unsigned len = 32 - clen;
+ unsigned cpos = 31 - pos;
+ TCGv dest, src;
+
+ if (c) {
+ nullify_over(ctx);
+ }
+
+ dest = dest_gpr(ctx, rt);
+ src = load_gpr(ctx, rr);
+ if (is_se) {
+ tcg_gen_sextract_tl(dest, src, cpos, len);
+ } else {
+ tcg_gen_extract_tl(dest, src, cpos, len);
+ }
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static const DisasInsn table_sh_ex[] = {
+ { 0xd0000000u, 0xfc001fe0u, trans_shrpw_sar },
+ { 0xd0000800u, 0xfc001c00u, trans_shrpw_imm },
+ { 0xd0001000u, 0xfc001be0u, trans_extrw_sar },
+ { 0xd0001800u, 0xfc001800u, trans_extrw_imm },
+};
+
+static ExitStatus trans_depw_imm_c(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned clen = extract32(insn, 0, 5);
+ unsigned cpos = extract32(insn, 5, 5);
+ unsigned nz = extract32(insn, 10, 1);
+ unsigned c = extract32(insn, 13, 3);
+ target_long val = low_sextract(insn, 16, 5);
+ unsigned rt = extract32(insn, 21, 5);
+ unsigned len = 32 - clen;
+ target_long mask0, mask1;
+ TCGv dest;
+
+ if (c) {
+ nullify_over(ctx);
+ }
+ if (cpos + len > 32) {
+ len = 32 - cpos;
+ }
+
+ dest = dest_gpr(ctx, rt);
+ mask0 = deposit64(0, cpos, len, val);
+ mask1 = deposit64(-1, cpos, len, val);
+
+ if (nz) {
+ TCGv src = load_gpr(ctx, rt);
+ if (mask1 != -1) {
+ tcg_gen_andi_tl(dest, src, mask1);
+ src = dest;
+ }
+ tcg_gen_ori_tl(dest, src, mask0);
+ } else {
+ tcg_gen_movi_tl(dest, mask0);
+ }
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_depw_imm(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned clen = extract32(insn, 0, 5);
+ unsigned cpos = extract32(insn, 5, 5);
+ unsigned nz = extract32(insn, 10, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned rr = extract32(insn, 16, 5);
+ unsigned rt = extract32(insn, 21, 5);
+ unsigned rs = nz ? rt : 0;
+ unsigned len = 32 - clen;
+ TCGv dest, val;
+
+ if (c) {
+ nullify_over(ctx);
+ }
+ if (cpos + len > 32) {
+ len = 32 - cpos;
+ }
+
+ dest = dest_gpr(ctx, rt);
+ val = load_gpr(ctx, rr);
+ if (rs == 0) {
+ tcg_gen_deposit_z_tl(dest, val, cpos, len);
+ } else {
+ tcg_gen_deposit_tl(dest, cpu_gr[rs], val, cpos, len);
+ }
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_depw_sar(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned clen = extract32(insn, 0, 5);
+ unsigned nz = extract32(insn, 10, 1);
+ unsigned i = extract32(insn, 12, 1);
+ unsigned c = extract32(insn, 13, 3);
+ unsigned rt = extract32(insn, 21, 5);
+ unsigned rs = nz ? rt : 0;
+ unsigned len = 32 - clen;
+ TCGv val, mask, tmp, shift, dest;
+ unsigned msb = 1U << (len - 1);
+
+ if (c) {
+ nullify_over(ctx);
+ }
+
+ if (i) {
+ val = load_const(ctx, low_sextract(insn, 16, 5));
+ } else {
+ val = load_gpr(ctx, extract32(insn, 16, 5));
+ }
+ dest = dest_gpr(ctx, rt);
+ shift = tcg_temp_new();
+ tmp = tcg_temp_new();
+
+ /* Convert big-endian bit numbering in SAR to left-shift. */
+ tcg_gen_xori_tl(shift, cpu_sar, TARGET_LONG_BITS - 1);
+
+ mask = tcg_const_tl(msb + (msb - 1));
+ tcg_gen_and_tl(tmp, val, mask);
+ if (rs) {
+ tcg_gen_shl_tl(mask, mask, shift);
+ tcg_gen_shl_tl(tmp, tmp, shift);
+ tcg_gen_andc_tl(dest, cpu_gr[rs], mask);
+ tcg_gen_or_tl(dest, dest, tmp);
+ } else {
+ tcg_gen_shl_tl(dest, tmp, shift);
+ }
+ tcg_temp_free(shift);
+ tcg_temp_free(mask);
+ tcg_temp_free(tmp);
+ save_gpr(ctx, rt, dest);
+
+ /* Install the new nullification. */
+ cond_free(&ctx->null_cond);
+ if (c) {
+ ctx->null_cond = do_sed_cond(c, dest);
+ }
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static const DisasInsn table_depw[] = {
+ { 0xd4000000u, 0xfc000be0u, trans_depw_sar },
+ { 0xd4000800u, 0xfc001800u, trans_depw_imm },
+ { 0xd4001800u, 0xfc001800u, trans_depw_imm_c },
+};
+
+static ExitStatus trans_be(DisasContext *ctx, uint32_t insn, bool is_l)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned b = extract32(insn, 21, 5);
+ target_long disp = assemble_17(insn);
+
+ /* unsigned s = low_uextract(insn, 13, 3); */
+ /* ??? It seems like there should be a good way of using
+ "be disp(sr2, r0)", the canonical gateway entry mechanism
+ to our advantage. But that appears to be inconvenient to
+ manage along side branch delay slots. Therefore we handle
+ entry into the gateway page via absolute address. */
+
+ /* Since we don't implement spaces, just branch. Do notice the special
+ case of "be disp(*,r0)" using a direct branch to disp, so that we can
+ goto_tb to the TB containing the syscall. */
+ if (b == 0) {
+ return do_dbranch(ctx, disp, is_l ? 31 : 0, n);
+ } else {
+ TCGv tmp = get_temp(ctx);
+ tcg_gen_addi_tl(tmp, load_gpr(ctx, b), disp);
+ return do_ibranch(ctx, tmp, is_l ? 31 : 0, n);
+ }
+}
+
+static ExitStatus trans_bl(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned link = extract32(insn, 21, 5);
+ target_long disp = assemble_17(insn);
+
+ return do_dbranch(ctx, iaoq_dest(ctx, disp), link, n);
+}
+
+static ExitStatus trans_bl_long(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ target_long disp = assemble_22(insn);
+
+ return do_dbranch(ctx, iaoq_dest(ctx, disp), 2, n);
+}
+
+static ExitStatus trans_blr(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned link = extract32(insn, 21, 5);
+ TCGv tmp = get_temp(ctx);
+
+ tcg_gen_shli_tl(tmp, load_gpr(ctx, rx), 3);
+ tcg_gen_addi_tl(tmp, tmp, ctx->iaoq_f + 8);
+ return do_ibranch(ctx, tmp, link, n);
+}
+
+static ExitStatus trans_bv(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned rx = extract32(insn, 16, 5);
+ unsigned rb = extract32(insn, 21, 5);
+ TCGv dest;
+
+ if (rx == 0) {
+ dest = load_gpr(ctx, rb);
+ } else {
+ dest = get_temp(ctx);
+ tcg_gen_shli_tl(dest, load_gpr(ctx, rx), 3);
+ tcg_gen_add_tl(dest, dest, load_gpr(ctx, rb));
+ }
+ return do_ibranch(ctx, dest, 0, n);
+}
+
+static ExitStatus trans_bve(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned n = extract32(insn, 1, 1);
+ unsigned rb = extract32(insn, 21, 5);
+ unsigned link = extract32(insn, 13, 1) ? 2 : 0;
+
+ return do_ibranch(ctx, load_gpr(ctx, rb), link, n);
+}
+
+static const DisasInsn table_branch[] = {
+ { 0xe8000000u, 0xfc006000u, trans_bl }, /* B,L and B,L,PUSH */
+ { 0xe800a000u, 0xfc00e000u, trans_bl_long },
+ { 0xe8004000u, 0xfc00fffdu, trans_blr },
+ { 0xe800c000u, 0xfc00fffdu, trans_bv },
+ { 0xe800d000u, 0xfc00dffcu, trans_bve },
+};
+
+static ExitStatus trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_wew(ctx, rt, ra, di->f_wew);
+}
+
+static ExitStatus trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = assemble_rt64(insn);
+ unsigned ra = assemble_ra64(insn);
+ return do_fop_wew(ctx, rt, ra, di->f_wew);
+}
+
+static ExitStatus trans_fop_ded(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_ded(ctx, rt, ra, di->f_ded);
+}
+
+static ExitStatus trans_fop_wed_0c(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_wed(ctx, rt, ra, di->f_wed);
+}
+
+static ExitStatus trans_fop_wed_0e(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = assemble_rt64(insn);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_wed(ctx, rt, ra, di->f_wed);
+}
+
+static ExitStatus trans_fop_dew_0c(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_dew(ctx, rt, ra, di->f_dew);
+}
+
+static ExitStatus trans_fop_dew_0e(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned ra = assemble_ra64(insn);
+ return do_fop_dew(ctx, rt, ra, di->f_dew);
+}
+
+static ExitStatus trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned rb = extract32(insn, 16, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
+}
+
+static ExitStatus trans_fop_weww_0e(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = assemble_rt64(insn);
+ unsigned rb = assemble_rb64(insn);
+ unsigned ra = assemble_ra64(insn);
+ return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
+}
+
+static ExitStatus trans_fop_dedd(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned rb = extract32(insn, 16, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fop_dedd(ctx, rt, ra, rb, di->f_dedd);
+}
+
+static void gen_fcpy_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_mov_i32(dst, src);
+}
+
+static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_mov_i64(dst, src);
+}
+
+static void gen_fabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_andi_i32(dst, src, INT32_MAX);
+}
+
+static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_andi_i64(dst, src, INT64_MAX);
+}
+
+static void gen_fneg_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_xori_i32(dst, src, INT32_MIN);
+}
+
+static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_xori_i64(dst, src, INT64_MIN);
+}
+
+static void gen_fnegabs_s(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
+{
+ tcg_gen_ori_i32(dst, src, INT32_MIN);
+}
+
+static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
+{
+ tcg_gen_ori_i64(dst, src, INT64_MIN);
+}
+
+static ExitStatus do_fcmp_s(DisasContext *ctx, unsigned ra, unsigned rb,
+ unsigned y, unsigned c)
+{
+ TCGv_i32 ta, tb, tc, ty;
+
+ nullify_over(ctx);
+
+ ta = load_frw0_i32(ra);
+ tb = load_frw0_i32(rb);
+ ty = tcg_const_i32(y);
+ tc = tcg_const_i32(c);
+
+ gen_helper_fcmp_s(cpu_env, ta, tb, ty, tc);
+
+ tcg_temp_free_i32(ta);
+ tcg_temp_free_i32(tb);
+ tcg_temp_free_i32(ty);
+ tcg_temp_free_i32(tc);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_fcmp_s_0c(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned c = extract32(insn, 0, 5);
+ unsigned y = extract32(insn, 13, 3);
+ unsigned rb = extract32(insn, 16, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ return do_fcmp_s(ctx, ra, rb, y, c);
+}
+
+static ExitStatus trans_fcmp_s_0e(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned c = extract32(insn, 0, 5);
+ unsigned y = extract32(insn, 13, 3);
+ unsigned rb = assemble_rb64(insn);
+ unsigned ra = assemble_ra64(insn);
+ return do_fcmp_s(ctx, ra, rb, y, c);
+}
+
+static ExitStatus trans_fcmp_d(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned c = extract32(insn, 0, 5);
+ unsigned y = extract32(insn, 13, 3);
+ unsigned rb = extract32(insn, 16, 5);
+ unsigned ra = extract32(insn, 21, 5);
+ TCGv_i64 ta, tb;
+ TCGv_i32 tc, ty;
+
+ nullify_over(ctx);
+
+ ta = load_frd0(ra);
+ tb = load_frd0(rb);
+ ty = tcg_const_i32(y);
+ tc = tcg_const_i32(c);
+
+ gen_helper_fcmp_d(cpu_env, ta, tb, ty, tc);
+
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i32(ty);
+ tcg_temp_free_i32(tc);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_ftest_t(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned y = extract32(insn, 13, 3);
+ unsigned cbit = (y ^ 1) - 1;
+ TCGv t;
+
+ nullify_over(ctx);
+
+ t = tcg_temp_new();
+ tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+ tcg_gen_extract_tl(t, t, 21 - cbit, 1);
+ ctx->null_cond = cond_make_0(TCG_COND_NE, t);
+ tcg_temp_free(t);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_ftest_q(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned c = extract32(insn, 0, 5);
+ int mask;
+ bool inv = false;
+ TCGv t;
+
+ nullify_over(ctx);
+
+ t = tcg_temp_new();
+ tcg_gen_ld32u_tl(t, cpu_env, offsetof(CPUHPPAState, fr0_shadow));
+
+ switch (c) {
+ case 0: /* simple */
+ tcg_gen_andi_tl(t, t, 0x4000000);
+ ctx->null_cond = cond_make_0(TCG_COND_NE, t);
+ goto done;
+ case 2: /* rej */
+ inv = true;
+ /* fallthru */
+ case 1: /* acc */
+ mask = 0x43ff800;
+ break;
+ case 6: /* rej8 */
+ inv = true;
+ /* fallthru */
+ case 5: /* acc8 */
+ mask = 0x43f8000;
+ break;
+ case 9: /* acc6 */
+ mask = 0x43e0000;
+ break;
+ case 13: /* acc4 */
+ mask = 0x4380000;
+ break;
+ case 17: /* acc2 */
+ mask = 0x4200000;
+ break;
+ default:
+ return gen_illegal(ctx);
+ }
+ if (inv) {
+ TCGv c = load_const(ctx, mask);
+ tcg_gen_or_tl(t, t, c);
+ ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
+ } else {
+ tcg_gen_andi_tl(t, t, mask);
+ ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
+ }
+ done:
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_xmpyu(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned rb = assemble_rb64(insn);
+ unsigned ra = assemble_ra64(insn);
+ TCGv_i64 a, b;
+
+ nullify_over(ctx);
+
+ a = load_frw0_i64(ra);
+ b = load_frw0_i64(rb);
+ tcg_gen_mul_i64(a, a, b);
+ save_frd(rt, a);
+ tcg_temp_free_i64(a);
+ tcg_temp_free_i64(b);
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+#define FOP_DED trans_fop_ded, .f_ded
+#define FOP_DEDD trans_fop_dedd, .f_dedd
+
+#define FOP_WEW trans_fop_wew_0c, .f_wew
+#define FOP_DEW trans_fop_dew_0c, .f_dew
+#define FOP_WED trans_fop_wed_0c, .f_wed
+#define FOP_WEWW trans_fop_weww_0c, .f_weww
+
+static const DisasInsn table_float_0c[] = {
+ /* floating point class zero */
+ { 0x30004000, 0xfc1fffe0, FOP_WEW = gen_fcpy_s },
+ { 0x30006000, 0xfc1fffe0, FOP_WEW = gen_fabs_s },
+ { 0x30008000, 0xfc1fffe0, FOP_WEW = gen_helper_fsqrt_s },
+ { 0x3000a000, 0xfc1fffe0, FOP_WEW = gen_helper_frnd_s },
+ { 0x3000c000, 0xfc1fffe0, FOP_WEW = gen_fneg_s },
+ { 0x3000e000, 0xfc1fffe0, FOP_WEW = gen_fnegabs_s },
+
+ { 0x30004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
+ { 0x30006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
+ { 0x30008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
+ { 0x3000a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
+ { 0x3000c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
+ { 0x3000e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
+
+ /* floating point class three */
+ { 0x30000600, 0xfc00ffe0, FOP_WEWW = gen_helper_fadd_s },
+ { 0x30002600, 0xfc00ffe0, FOP_WEWW = gen_helper_fsub_s },
+ { 0x30004600, 0xfc00ffe0, FOP_WEWW = gen_helper_fmpy_s },
+ { 0x30006600, 0xfc00ffe0, FOP_WEWW = gen_helper_fdiv_s },
+
+ { 0x30000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
+ { 0x30002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
+ { 0x30004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
+ { 0x30006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
+
+ /* floating point class one */
+ /* float/float */
+ { 0x30000a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_s },
+ { 0x30002200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_d },
+ /* int/float */
+ { 0x30008200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_w_s },
+ { 0x30008a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_dw_s },
+ { 0x3000a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_w_d },
+ { 0x3000aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
+ /* float/int */
+ { 0x30010200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_w },
+ { 0x30010a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_w },
+ { 0x30012200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_dw },
+ { 0x30012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
+ /* float/int truncate */
+ { 0x30018200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_w },
+ { 0x30018a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_w },
+ { 0x3001a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_dw },
+ { 0x3001aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
+ /* uint/float */
+ { 0x30028200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_uw_s },
+ { 0x30028a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_udw_s },
+ { 0x3002a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_uw_d },
+ { 0x3002aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
+ /* float/uint */
+ { 0x30030200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_s_uw },
+ { 0x30030a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_d_uw },
+ { 0x30032200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_s_udw },
+ { 0x30032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
+ /* float/uint truncate */
+ { 0x30038200, 0xfc1fffe0, FOP_WEW = gen_helper_fcnv_t_s_uw },
+ { 0x30038a00, 0xfc1fffe0, FOP_WED = gen_helper_fcnv_t_d_uw },
+ { 0x3003a200, 0xfc1fffe0, FOP_DEW = gen_helper_fcnv_t_s_udw },
+ { 0x3003aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
+
+ /* floating point class two */
+ { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c },
+ { 0x30000c00, 0xfc001fe0, trans_fcmp_d },
+ { 0x30002420, 0xffffffe0, trans_ftest_q },
+ { 0x30000420, 0xffff1fff, trans_ftest_t },
+
+ /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
+ This is machine/revision == 0, which is reserved for simulator. */
+ { 0x30000000, 0xffffffff, FOP_WEW = gen_fcpy_s },
+};
+
+#undef FOP_WEW
+#undef FOP_DEW
+#undef FOP_WED
+#undef FOP_WEWW
+#define FOP_WEW trans_fop_wew_0e, .f_wew
+#define FOP_DEW trans_fop_dew_0e, .f_dew
+#define FOP_WED trans_fop_wed_0e, .f_wed
+#define FOP_WEWW trans_fop_weww_0e, .f_weww
+
+static const DisasInsn table_float_0e[] = {
+ /* floating point class zero */
+ { 0x38004000, 0xfc1fff20, FOP_WEW = gen_fcpy_s },
+ { 0x38006000, 0xfc1fff20, FOP_WEW = gen_fabs_s },
+ { 0x38008000, 0xfc1fff20, FOP_WEW = gen_helper_fsqrt_s },
+ { 0x3800a000, 0xfc1fff20, FOP_WEW = gen_helper_frnd_s },
+ { 0x3800c000, 0xfc1fff20, FOP_WEW = gen_fneg_s },
+ { 0x3800e000, 0xfc1fff20, FOP_WEW = gen_fnegabs_s },
+
+ { 0x38004800, 0xfc1fffe0, FOP_DED = gen_fcpy_d },
+ { 0x38006800, 0xfc1fffe0, FOP_DED = gen_fabs_d },
+ { 0x38008800, 0xfc1fffe0, FOP_DED = gen_helper_fsqrt_d },
+ { 0x3800a800, 0xfc1fffe0, FOP_DED = gen_helper_frnd_d },
+ { 0x3800c800, 0xfc1fffe0, FOP_DED = gen_fneg_d },
+ { 0x3800e800, 0xfc1fffe0, FOP_DED = gen_fnegabs_d },
+
+ /* floating point class three */
+ { 0x38000600, 0xfc00ef20, FOP_WEWW = gen_helper_fadd_s },
+ { 0x38002600, 0xfc00ef20, FOP_WEWW = gen_helper_fsub_s },
+ { 0x38004600, 0xfc00ef20, FOP_WEWW = gen_helper_fmpy_s },
+ { 0x38006600, 0xfc00ef20, FOP_WEWW = gen_helper_fdiv_s },
+
+ { 0x38000e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fadd_d },
+ { 0x38002e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fsub_d },
+ { 0x38004e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fmpy_d },
+ { 0x38006e00, 0xfc00ffe0, FOP_DEDD = gen_helper_fdiv_d },
+
+ { 0x38004700, 0xfc00ef60, trans_xmpyu },
+
+ /* floating point class one */
+ /* float/float */
+ { 0x38000a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_s },
+ { 0x38002200, 0xfc1fffc0, FOP_DEW = gen_helper_fcnv_s_d },
+ /* int/float */
+ { 0x38008200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_w_s },
+ { 0x38008a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_dw_s },
+ { 0x3800a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_w_d },
+ { 0x3800aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_dw_d },
+ /* float/int */
+ { 0x38010200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_w },
+ { 0x38010a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_w },
+ { 0x38012200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_dw },
+ { 0x38012a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_dw },
+ /* float/int truncate */
+ { 0x38018200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_w },
+ { 0x38018a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_w },
+ { 0x3801a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_dw },
+ { 0x3801aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_dw },
+ /* uint/float */
+ { 0x38028200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_uw_s },
+ { 0x38028a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_udw_s },
+ { 0x3802a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_uw_d },
+ { 0x3802aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_udw_d },
+ /* float/uint */
+ { 0x38030200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_s_uw },
+ { 0x38030a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_d_uw },
+ { 0x38032200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_s_udw },
+ { 0x38032a00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_d_udw },
+ /* float/uint truncate */
+ { 0x38038200, 0xfc1ffe60, FOP_WEW = gen_helper_fcnv_t_s_uw },
+ { 0x38038a00, 0xfc1fffa0, FOP_WED = gen_helper_fcnv_t_d_uw },
+ { 0x3803a200, 0xfc1fff60, FOP_DEW = gen_helper_fcnv_t_s_udw },
+ { 0x3803aa00, 0xfc1fffe0, FOP_DED = gen_helper_fcnv_t_d_udw },
+
+ /* floating point class two */
+ { 0x38000400, 0xfc000f60, trans_fcmp_s_0e },
+ { 0x38000c00, 0xfc001fe0, trans_fcmp_d },
+};
+
+#undef FOP_WEW
+#undef FOP_DEW
+#undef FOP_WED
+#undef FOP_WEWW
+#undef FOP_DED
+#undef FOP_DEDD
+
+/* Convert the fmpyadd single-precision register encodings to standard. */
+static inline int fmpyadd_s_reg(unsigned r)
+{
+ return (r & 16) * 2 + 16 + (r & 15);
+}
+
+static ExitStatus trans_fmpyadd(DisasContext *ctx, uint32_t insn, bool is_sub)
+{
+ unsigned tm = extract32(insn, 0, 5);
+ unsigned f = extract32(insn, 5, 1);
+ unsigned ra = extract32(insn, 6, 5);
+ unsigned ta = extract32(insn, 11, 5);
+ unsigned rm2 = extract32(insn, 16, 5);
+ unsigned rm1 = extract32(insn, 21, 5);
+
+ nullify_over(ctx);
+
+ /* Independent multiply & add/sub, with undefined behaviour
+ if outputs overlap inputs. */
+ if (f == 0) {
+ tm = fmpyadd_s_reg(tm);
+ ra = fmpyadd_s_reg(ra);
+ ta = fmpyadd_s_reg(ta);
+ rm2 = fmpyadd_s_reg(rm2);
+ rm1 = fmpyadd_s_reg(rm1);
+ do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
+ do_fop_weww(ctx, ta, ta, ra,
+ is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
+ } else {
+ do_fop_dedd(ctx, tm, rm1, rm2, gen_helper_fmpy_d);
+ do_fop_dedd(ctx, ta, ta, ra,
+ is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
+ }
+
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_fmpyfadd_s(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = assemble_rt64(insn);
+ unsigned neg = extract32(insn, 5, 1);
+ unsigned rm1 = assemble_ra64(insn);
+ unsigned rm2 = assemble_rb64(insn);
+ unsigned ra3 = assemble_rc64(insn);
+ TCGv_i32 a, b, c;
+
+ nullify_over(ctx);
+ a = load_frw0_i32(rm1);
+ b = load_frw0_i32(rm2);
+ c = load_frw0_i32(ra3);
+
+ if (neg) {
+ gen_helper_fmpynfadd_s(a, cpu_env, a, b, c);
+ } else {
+ gen_helper_fmpyfadd_s(a, cpu_env, a, b, c);
+ }
+
+ tcg_temp_free_i32(b);
+ tcg_temp_free_i32(c);
+ save_frw_i32(rt, a);
+ tcg_temp_free_i32(a);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static ExitStatus trans_fmpyfadd_d(DisasContext *ctx, uint32_t insn,
+ const DisasInsn *di)
+{
+ unsigned rt = extract32(insn, 0, 5);
+ unsigned neg = extract32(insn, 5, 1);
+ unsigned rm1 = extract32(insn, 21, 5);
+ unsigned rm2 = extract32(insn, 16, 5);
+ unsigned ra3 = assemble_rc64(insn);
+ TCGv_i64 a, b, c;
+
+ nullify_over(ctx);
+ a = load_frd0(rm1);
+ b = load_frd0(rm2);
+ c = load_frd0(ra3);
+
+ if (neg) {
+ gen_helper_fmpynfadd_d(a, cpu_env, a, b, c);
+ } else {
+ gen_helper_fmpyfadd_d(a, cpu_env, a, b, c);
+ }
+
+ tcg_temp_free_i64(b);
+ tcg_temp_free_i64(c);
+ save_frd(rt, a);
+ tcg_temp_free_i64(a);
+ return nullify_end(ctx, NO_EXIT);
+}
+
+static const DisasInsn table_fp_fused[] = {
+ { 0xb8000000u, 0xfc000800u, trans_fmpyfadd_s },
+ { 0xb8000800u, 0xfc0019c0u, trans_fmpyfadd_d }
+};
+
+static ExitStatus translate_table_int(DisasContext *ctx, uint32_t insn,
+ const DisasInsn table[], size_t n)
+{
+ size_t i;
+ for (i = 0; i < n; ++i) {
+ if ((insn & table[i].mask) == table[i].insn) {
+ return table[i].trans(ctx, insn, &table[i]);
+ }
+ }
+ return gen_illegal(ctx);
+}
+
+#define translate_table(ctx, insn, table) \
+ translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
+
+static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
+{
+ uint32_t opc = extract32(insn, 26, 6);
+
+ switch (opc) {
+ case 0x00: /* system op */
+ return translate_table(ctx, insn, table_system);
+ case 0x01:
+ return translate_table(ctx, insn, table_mem_mgmt);
+ case 0x02:
+ return translate_table(ctx, insn, table_arith_log);
+ case 0x03:
+ return translate_table(ctx, insn, table_index_mem);
+ case 0x06:
+ return trans_fmpyadd(ctx, insn, false);
+ case 0x08:
+ return trans_ldil(ctx, insn);
+ case 0x09:
+ return trans_copr_w(ctx, insn);
+ case 0x0A:
+ return trans_addil(ctx, insn);
+ case 0x0B:
+ return trans_copr_dw(ctx, insn);
+ case 0x0C:
+ return translate_table(ctx, insn, table_float_0c);
+ case 0x0D:
+ return trans_ldo(ctx, insn);
+ case 0x0E:
+ return translate_table(ctx, insn, table_float_0e);
+
+ case 0x10:
+ return trans_load(ctx, insn, false, MO_UB);
+ case 0x11:
+ return trans_load(ctx, insn, false, MO_TEUW);
+ case 0x12:
+ return trans_load(ctx, insn, false, MO_TEUL);
+ case 0x13:
+ return trans_load(ctx, insn, true, MO_TEUL);
+ case 0x16:
+ return trans_fload_mod(ctx, insn);
+ case 0x17:
+ return trans_load_w(ctx, insn);
+ case 0x18:
+ return trans_store(ctx, insn, false, MO_UB);
+ case 0x19:
+ return trans_store(ctx, insn, false, MO_TEUW);
+ case 0x1A:
+ return trans_store(ctx, insn, false, MO_TEUL);
+ case 0x1B:
+ return trans_store(ctx, insn, true, MO_TEUL);
+ case 0x1E:
+ return trans_fstore_mod(ctx, insn);
+ case 0x1F:
+ return trans_store_w(ctx, insn);
+
+ case 0x20:
+ return trans_cmpb(ctx, insn, true, false, false);
+ case 0x21:
+ return trans_cmpb(ctx, insn, true, true, false);
+ case 0x22:
+ return trans_cmpb(ctx, insn, false, false, false);
+ case 0x23:
+ return trans_cmpb(ctx, insn, false, true, false);
+ case 0x24:
+ return trans_cmpiclr(ctx, insn);
+ case 0x25:
+ return trans_subi(ctx, insn);
+ case 0x26:
+ return trans_fmpyadd(ctx, insn, true);
+ case 0x27:
+ return trans_cmpb(ctx, insn, true, false, true);
+ case 0x28:
+ return trans_addb(ctx, insn, true, false);
+ case 0x29:
+ return trans_addb(ctx, insn, true, true);
+ case 0x2A:
+ return trans_addb(ctx, insn, false, false);
+ case 0x2B:
+ return trans_addb(ctx, insn, false, true);
+ case 0x2C:
+ case 0x2D:
+ return trans_addi(ctx, insn);
+ case 0x2E:
+ return translate_table(ctx, insn, table_fp_fused);
+ case 0x2F:
+ return trans_cmpb(ctx, insn, false, false, true);
+
+ case 0x30:
+ case 0x31:
+ return trans_bb(ctx, insn);
+ case 0x32:
+ return trans_movb(ctx, insn, false);
+ case 0x33:
+ return trans_movb(ctx, insn, true);
+ case 0x34:
+ return translate_table(ctx, insn, table_sh_ex);
+ case 0x35:
+ return translate_table(ctx, insn, table_depw);
+ case 0x38:
+ return trans_be(ctx, insn, false);
+ case 0x39:
+ return trans_be(ctx, insn, true);
+ case 0x3A:
+ return translate_table(ctx, insn, table_branch);
+
+ case 0x04: /* spopn */
+ case 0x05: /* diag */
+ case 0x0F: /* product specific */
+ break;
+
+ case 0x07: /* unassigned */
+ case 0x15: /* unassigned */
+ case 0x1D: /* unassigned */
+ case 0x37: /* unassigned */
+ case 0x3F: /* unassigned */
+ default:
+ break;
+ }
+ return gen_illegal(ctx);
+}
+
+void gen_intermediate_code(CPUHPPAState *env, struct TranslationBlock *tb)
+{
+ HPPACPU *cpu = hppa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx;
+ ExitStatus ret;
+ int num_insns, max_insns, i;
+
+ ctx.tb = tb;
+ ctx.cs = cs;
+ ctx.iaoq_f = tb->pc;
+ ctx.iaoq_b = tb->cs_base;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
+
+ ctx.ntemps = 0;
+ for (i = 0; i < ARRAY_SIZE(ctx.temps); ++i) {
+ TCGV_UNUSED(ctx.temps[i]);
+ }
+
+ /* Compute the maximum number of insns to execute, as bounded by
+ (1) icount, (2) single-stepping, (3) branch delay slots, or
+ (4) the number of insns remaining on the current page. */
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (ctx.singlestep_enabled || singlestep) {
+ max_insns = 1;
+ } else if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ num_insns = 0;
+ gen_tb_start(tb);
+
+ /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
+ ctx.null_cond = cond_make_f();
+ ctx.psw_n_nonzero = false;
+ if (tb->flags & 1) {
+ ctx.null_cond.c = TCG_COND_ALWAYS;
+ ctx.psw_n_nonzero = true;
+ }
+ ctx.null_lab = NULL;
+
+ do {
+ tcg_gen_insn_start(ctx.iaoq_f, ctx.iaoq_b);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.iaoq_f, BP_ANY))) {
+ ret = gen_excp(&ctx, EXCP_DEBUG);
+ break;
+ }
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ if (ctx.iaoq_f < TARGET_PAGE_SIZE) {
+ ret = do_page_zero(&ctx);
+ assert(ret != NO_EXIT);
+ } else {
+ /* Always fetch the insn, even if nullified, so that we check
+ the page permissions for execute. */
+ uint32_t insn = cpu_ldl_code(env, ctx.iaoq_f);
+
+ /* Set up the IA queue for the next insn.
+ This will be overwritten by a branch. */
+ if (ctx.iaoq_b == -1) {
+ ctx.iaoq_n = -1;
+ ctx.iaoq_n_var = get_temp(&ctx);
+ tcg_gen_addi_tl(ctx.iaoq_n_var, cpu_iaoq_b, 4);
+ } else {
+ ctx.iaoq_n = ctx.iaoq_b + 4;
+ TCGV_UNUSED(ctx.iaoq_n_var);
+ }
+
+ if (unlikely(ctx.null_cond.c == TCG_COND_ALWAYS)) {
+ ctx.null_cond.c = TCG_COND_NEVER;
+ ret = NO_EXIT;
+ } else {
+ ret = translate_one(&ctx, insn);
+ assert(ctx.null_lab == NULL);
+ }
+ }
+
+ for (i = 0; i < ctx.ntemps; ++i) {
+ tcg_temp_free(ctx.temps[i]);
+ TCGV_UNUSED(ctx.temps[i]);
+ }
+ ctx.ntemps = 0;
+
+ /* If we see non-linear instructions, exhaust instruction count,
+ or run out of buffer space, stop generation. */
+ /* ??? The non-linear instruction restriction is purely due to
+ the debugging dump. Otherwise we *could* follow unconditional
+ branches within the same page. */
+ if (ret == NO_EXIT
+ && (ctx.iaoq_b != ctx.iaoq_f + 4
+ || num_insns >= max_insns
+ || tcg_op_buf_full())) {
+ if (ctx.null_cond.c == TCG_COND_NEVER
+ || ctx.null_cond.c == TCG_COND_ALWAYS) {
+ nullify_set(&ctx, ctx.null_cond.c == TCG_COND_ALWAYS);
+ gen_goto_tb(&ctx, 0, ctx.iaoq_b, ctx.iaoq_n);
+ ret = EXIT_GOTO_TB;
+ } else {
+ ret = EXIT_IAQ_N_STALE;
+ }
+ }
+
+ ctx.iaoq_f = ctx.iaoq_b;
+ ctx.iaoq_b = ctx.iaoq_n;
+ if (ret == EXIT_NORETURN
+ || ret == EXIT_GOTO_TB
+ || ret == EXIT_IAQ_N_UPDATED) {
+ break;
+ }
+ if (ctx.iaoq_f == -1) {
+ tcg_gen_mov_tl(cpu_iaoq_f, cpu_iaoq_b);
+ copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_n, ctx.iaoq_n_var);
+ nullify_save(&ctx);
+ ret = EXIT_IAQ_N_UPDATED;
+ break;
+ }
+ if (ctx.iaoq_b == -1) {
+ tcg_gen_mov_tl(cpu_iaoq_b, ctx.iaoq_n_var);
+ }
+ } while (ret == NO_EXIT);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ switch (ret) {
+ case EXIT_GOTO_TB:
+ case EXIT_NORETURN:
+ break;
+ case EXIT_IAQ_N_STALE:
+ copy_iaoq_entry(cpu_iaoq_f, ctx.iaoq_f, cpu_iaoq_f);
+ copy_iaoq_entry(cpu_iaoq_b, ctx.iaoq_b, cpu_iaoq_b);
+ nullify_save(&ctx);
+ /* FALLTHRU */
+ case EXIT_IAQ_N_UPDATED:
+ if (ctx.singlestep_enabled) {
+ gen_excp_1(EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ break;
+ default:
+ abort();
+ }
+
+ gen_tb_end(tb, num_insns);
+
+ tb->size = num_insns * 4;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(tb->pc)) {
+ qemu_log_lock();
+ switch (tb->pc) {
+ case 0x00:
+ qemu_log("IN:\n0x00000000: (null)\n\n");
+ break;
+ case 0xb0:
+ qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
+ break;
+ case 0xe0:
+ qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
+ break;
+ case 0x100:
+ qemu_log("IN:\n0x00000100: syscall\n\n");
+ break;
+ default:
+ qemu_log("IN: %s\n", lookup_symbol(tb->pc));
+ log_target_disas(cs, tb->pc, tb->size, 1);
+ qemu_log("\n");
+ break;
+ }
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->iaoq_f = data[0];
+ if (data[1] != -1) {
+ env->iaoq_b = data[1];
+ }
+ /* Since we were executing the instruction at IAOQ_F, and took some
+ sort of action that provoked the cpu_restore_state, we can infer
+ that the instruction was not nullified. */
+ env->psw_n = 0;
+}
diff --git a/target/i386/Makefile.objs b/target/i386/Makefile.objs
index b223d7932b..4fcb7f3df0 100644
--- a/target/i386/Makefile.objs
+++ b/target/i386/Makefile.objs
@@ -5,3 +5,10 @@ obj-y += gdbstub.o
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
obj-$(CONFIG_KVM) += kvm.o hyperv.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
+# HAX support
+ifdef CONFIG_WIN32
+obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-windows.o
+endif
+ifdef CONFIG_DARWIN
+obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-darwin.o
+endif
diff --git a/target/i386/cc_helper.c b/target/i386/cc_helper.c
index 83af223c9f..c9c90e10db 100644
--- a/target/i386/cc_helper.c
+++ b/target/i386/cc_helper.c
@@ -105,6 +105,8 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
return src1;
case CC_OP_CLR:
return CC_Z | CC_P;
+ case CC_OP_POPCNT:
+ return src1 ? 0 : CC_Z;
case CC_OP_MULB:
return compute_all_mulb(dst, src1);
@@ -232,6 +234,7 @@ target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
case CC_OP_CLR:
+ case CC_OP_POPCNT:
return 0;
case CC_OP_EFLAGS:
diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h
index 7c9a07ae65..8cd607e9a2 100644
--- a/target/i386/cpu-qom.h
+++ b/target/i386/cpu-qom.h
@@ -48,6 +48,7 @@ typedef struct X86CPUDefinition X86CPUDefinition;
* X86CPUClass:
* @cpu_def: CPU model definition
* @kvm_required: Whether CPU model requires KVM to be enabled.
+ * @migration_safe: See CpuDefinitionInfo::migration_safe
* @parent_realize: The parent class' realize handler.
* @parent_reset: The parent class' reset handler.
*
@@ -62,6 +63,7 @@ typedef struct X86CPUClass {
X86CPUDefinition *cpu_def;
bool kvm_required;
+ bool migration_safe;
/* Optional description of CPU model.
* If unavailable, cpu_def->model_id is used */
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index b0640f1e38..cff23e129d 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -435,7 +435,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, "avx512vbmi", "umip", "pku",
"ospke", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ NULL, NULL, "avx512-vpopcntdq", NULL,
"la57", NULL, NULL, NULL,
NULL, NULL, "rdpid", NULL,
NULL, NULL, NULL, NULL,
@@ -1339,12 +1339,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
.features[FEAT_1_ECX] =
CPUID_EXT_SSE3,
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
- CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
- CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
- CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
- CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
- CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
.xlevel = 0x80000008,
.model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
},
@@ -1365,13 +1360,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_CX16 | CPUID_EXT_SSE3,
/* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_FXSR |
- CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
- CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
- CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
- CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
- CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
- CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
.xlevel = 0x80000008,
@@ -1395,13 +1384,7 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_SSE3,
/* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM | CPUID_EXT2_FXSR |
- CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
- CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
- CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
- CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
- CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
- CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
@@ -1428,13 +1411,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_SSE3,
/* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM |
- CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
- CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
- CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
- CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
- CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
- CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
@@ -1464,13 +1442,8 @@ static X86CPUDefinition builtin_x86_defs[] = {
CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
/* Missing: CPUID_EXT2_RDTSCP */
.features[FEAT_8000_0001_EDX] =
- CPUID_EXT2_LM |
- CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
- CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
- CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
- CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
- CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
- CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
.features[FEAT_8000_0001_ECX] =
CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
@@ -2235,6 +2208,9 @@ static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
info->name = x86_cpu_class_get_model_name(cc);
x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
info->has_unavailable_features = true;
+ info->q_typename = g_strdup(object_class_get_name(oc));
+ info->migration_safe = cc->migration_safe;
+ info->has_migration_safe = true;
entry = g_malloc0(sizeof(*entry));
entry->value = info;
@@ -2382,6 +2358,7 @@ static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
X86CPUClass *xcc = X86_CPU_CLASS(oc);
xcc->cpu_def = cpudef;
+ xcc->migration_safe = true;
}
static void x86_register_cpudef_type(X86CPUDefinition *def)
@@ -2394,6 +2371,11 @@ static void x86_register_cpudef_type(X86CPUDefinition *def)
.class_data = def,
};
+ /* AMD aliases are handled at runtime based on CPUID vendor, so
+ * they shouldn't be set on the CPU model table.
+ */
+ assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
+
type_register(&ti);
g_free(typename);
}
@@ -2819,8 +2801,6 @@ static void x86_cpu_reset(CPUState *s)
memset(env, 0, offsetof(CPUX86State, end_reset_fields));
- tlb_flush(s, 1);
-
env->old_exception = -1;
/* init to reset state */
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index a7f2f6099d..10c5a3538d 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -630,6 +630,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_ECX_UMIP (1U << 2)
#define CPUID_7_0_ECX_PKU (1U << 3)
#define CPUID_7_0_ECX_OSPKE (1U << 4)
+#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */
#define CPUID_7_0_ECX_LA57 (1U << 16)
#define CPUID_7_0_ECX_RDPID (1U << 22)
@@ -777,6 +778,7 @@ typedef enum {
CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
CC_OP_CLR, /* Z set, all other flags clear. */
+ CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
CC_OP_NB,
} CCOp;
@@ -1122,10 +1124,12 @@ typedef struct CPUX86State {
uint8_t nmi_injected;
uint8_t nmi_pending;
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
- /* Fields from here on are preserved across CPU reset. */
- struct {} end_reset_fields;
+ /* Fields after CPU_COMMON are preserved across CPU reset. */
/* processor features (e.g. for CPUID insn) */
/* Minimum level/xlevel/xlevel2, based on CPU model + features */
diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c
index 2049a8c01d..66474ad98e 100644
--- a/target/i386/fpu_helper.c
+++ b/target/i386/fpu_helper.c
@@ -1465,7 +1465,7 @@ void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
}
if (env->pkru != old_pkru) {
CPUState *cs = CPU(x86_env_get_cpu(env));
- tlb_flush(cs, 1);
+ tlb_flush(cs);
}
}
}
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
new file mode 100644
index 0000000000..ef13015215
--- /dev/null
+++ b/target/i386/hax-all.c
@@ -0,0 +1,1155 @@
+/*
+ * QEMU HAX support
+ *
+ * Copyright IBM, Corp. 2008
+ * Red Hat, Inc. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Glauber Costa <gcosta@redhat.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * HAX common code for both windows and darwin
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "exec/exec-all.h"
+#include "exec/ioport.h"
+
+#include "qemu-common.h"
+#include "strings.h"
+#include "hax-i386.h"
+#include "sysemu/accel.h"
+#include "sysemu/sysemu.h"
+#include "qemu/main-loop.h"
+#include "hw/boards.h"
+
+#define DEBUG_HAX 0
+
+#define DPRINTF(fmt, ...) \
+ do { \
+ if (DEBUG_HAX) { \
+ fprintf(stdout, fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/* Current version */
+const uint32_t hax_cur_version = 0x4; /* API v4: unmapping and MMIO moves */
+/* Minimum HAX kernel version */
+const uint32_t hax_min_version = 0x4; /* API v4: supports unmapping */
+
+static bool hax_allowed;
+
+struct hax_state hax_global;
+
+static void hax_vcpu_sync_state(CPUArchState *env, int modified);
+static int hax_arch_get_registers(CPUArchState *env);
+
+int hax_enabled(void)
+{
+ return hax_allowed;
+}
+
+int valid_hax_tunnel_size(uint16_t size)
+{
+ return size >= sizeof(struct hax_tunnel);
+}
+
+hax_fd hax_vcpu_get_fd(CPUArchState *env)
+{
+ struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu;
+ if (!vcpu) {
+ return HAX_INVALID_FD;
+ }
+ return vcpu->fd;
+}
+
+static int hax_get_capability(struct hax_state *hax)
+{
+ int ret;
+ struct hax_capabilityinfo capinfo, *cap = &capinfo;
+
+ ret = hax_capability(hax, cap);
+ if (ret) {
+ return ret;
+ }
+
+ if ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) == HAX_CAP_STATUS_NOTWORKING) {
+ if (cap->winfo & HAX_CAP_FAILREASON_VT) {
+ DPRINTF
+ ("VTX feature is not enabled, HAX driver will not work.\n");
+ } else if (cap->winfo & HAX_CAP_FAILREASON_NX) {
+ DPRINTF
+ ("NX feature is not enabled, HAX driver will not work.\n");
+ }
+ return -ENXIO;
+
+ }
+
+ if (!(cap->winfo & HAX_CAP_UG)) {
+ fprintf(stderr, "UG mode is not supported by the hardware.\n");
+ return -ENOTSUP;
+ }
+
+ if (cap->wstatus & HAX_CAP_MEMQUOTA) {
+ if (cap->mem_quota < hax->mem_quota) {
+ fprintf(stderr, "The VM memory needed exceeds the driver limit.\n");
+ return -ENOSPC;
+ }
+ }
+ return 0;
+}
+
+static int hax_version_support(struct hax_state *hax)
+{
+ int ret;
+ struct hax_module_version version;
+
+ ret = hax_mod_version(hax, &version);
+ if (ret < 0) {
+ return 0;
+ }
+
+ if (hax_min_version > version.cur_version) {
+ fprintf(stderr, "Incompatible HAX module version %d,",
+ version.cur_version);
+ fprintf(stderr, "requires minimum version %d\n", hax_min_version);
+ return 0;
+ }
+ if (hax_cur_version < version.compat_version) {
+ fprintf(stderr, "Incompatible QEMU HAX API version %x,",
+ hax_cur_version);
+ fprintf(stderr, "requires minimum HAX API version %x\n",
+ version.compat_version);
+ return 0;
+ }
+
+ return 1;
+}
+
+int hax_vcpu_create(int id)
+{
+ struct hax_vcpu_state *vcpu = NULL;
+ int ret;
+
+ if (!hax_global.vm) {
+ fprintf(stderr, "vcpu %x created failed, vm is null\n", id);
+ return -1;
+ }
+
+ if (hax_global.vm->vcpus[id]) {
+ fprintf(stderr, "vcpu %x allocated already\n", id);
+ return 0;
+ }
+
+ vcpu = g_malloc(sizeof(struct hax_vcpu_state));
+ if (!vcpu) {
+ fprintf(stderr, "Failed to alloc vcpu state\n");
+ return -ENOMEM;
+ }
+
+ memset(vcpu, 0, sizeof(struct hax_vcpu_state));
+
+ ret = hax_host_create_vcpu(hax_global.vm->fd, id);
+ if (ret) {
+ fprintf(stderr, "Failed to create vcpu %x\n", id);
+ goto error;
+ }
+
+ vcpu->vcpu_id = id;
+ vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
+ if (hax_invalid_fd(vcpu->fd)) {
+ fprintf(stderr, "Failed to open the vcpu\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hax_global.vm->vcpus[id] = vcpu;
+
+ ret = hax_host_setup_vcpu_channel(vcpu);
+ if (ret) {
+ fprintf(stderr, "Invalid hax tunnel size\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ return 0;
+
+ error:
+ /* vcpu and tunnel will be closed automatically */
+ if (vcpu && !hax_invalid_fd(vcpu->fd)) {
+ hax_close_fd(vcpu->fd);
+ }
+
+ hax_global.vm->vcpus[id] = NULL;
+ g_free(vcpu);
+ return -1;
+}
+
+int hax_vcpu_destroy(CPUState *cpu)
+{
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+
+ if (!hax_global.vm) {
+ fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
+ return -1;
+ }
+
+ if (!vcpu) {
+ return 0;
+ }
+
+ /*
+ * 1. The hax_tunnel is also destroied when vcpu destroy
+ * 2. close fd will cause hax module vcpu be cleaned
+ */
+ hax_close_fd(vcpu->fd);
+ hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
+ g_free(vcpu);
+ return 0;
+}
+
+int hax_init_vcpu(CPUState *cpu)
+{
+ int ret;
+
+ ret = hax_vcpu_create(cpu->cpu_index);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to create HAX vcpu\n");
+ exit(-1);
+ }
+
+ cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
+ cpu->hax_vcpu_dirty = true;
+ qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
+
+ return ret;
+}
+
+struct hax_vm *hax_vm_create(struct hax_state *hax)
+{
+ struct hax_vm *vm;
+ int vm_id = 0, ret;
+
+ if (hax_invalid_fd(hax->fd)) {
+ return NULL;
+ }
+
+ if (hax->vm) {
+ return hax->vm;
+ }
+
+ vm = g_malloc(sizeof(struct hax_vm));
+ if (!vm) {
+ return NULL;
+ }
+ memset(vm, 0, sizeof(struct hax_vm));
+ ret = hax_host_create_vm(hax, &vm_id);
+ if (ret) {
+ fprintf(stderr, "Failed to create vm %x\n", ret);
+ goto error;
+ }
+ vm->id = vm_id;
+ vm->fd = hax_host_open_vm(hax, vm_id);
+ if (hax_invalid_fd(vm->fd)) {
+ fprintf(stderr, "Failed to open vm %d\n", vm_id);
+ goto error;
+ }
+
+ hax->vm = vm;
+ return vm;
+
+ error:
+ g_free(vm);
+ hax->vm = NULL;
+ return NULL;
+}
+
+int hax_vm_destroy(struct hax_vm *vm)
+{
+ int i;
+
+ for (i = 0; i < HAX_MAX_VCPU; i++)
+ if (vm->vcpus[i]) {
+ fprintf(stderr, "VCPU should be cleaned before vm clean\n");
+ return -1;
+ }
+ hax_close_fd(vm->fd);
+ g_free(vm);
+ hax_global.vm = NULL;
+ return 0;
+}
+
+static void hax_handle_interrupt(CPUState *cpu, int mask)
+{
+ cpu->interrupt_request |= mask;
+
+ if (!qemu_cpu_is_self(cpu)) {
+ qemu_cpu_kick(cpu);
+ }
+}
+
+static int hax_init(ram_addr_t ram_size)
+{
+ struct hax_state *hax = NULL;
+ struct hax_qemu_version qversion;
+ int ret;
+
+ hax = &hax_global;
+
+ memset(hax, 0, sizeof(struct hax_state));
+ hax->mem_quota = ram_size;
+
+ hax->fd = hax_mod_open();
+ if (hax_invalid_fd(hax->fd)) {
+ hax->fd = 0;
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = hax_get_capability(hax);
+
+ if (ret) {
+ if (ret != -ENOSPC) {
+ ret = -EINVAL;
+ }
+ goto error;
+ }
+
+ if (!hax_version_support(hax)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax->vm = hax_vm_create(hax);
+ if (!hax->vm) {
+ fprintf(stderr, "Failed to create HAX VM\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax_memory_init();
+
+ qversion.cur_version = hax_cur_version;
+ qversion.min_version = hax_min_version;
+ hax_notify_qemu_version(hax->vm->fd, &qversion);
+ cpu_interrupt_handler = hax_handle_interrupt;
+
+ return ret;
+ error:
+ if (hax->vm) {
+ hax_vm_destroy(hax->vm);
+ }
+ if (hax->fd) {
+ hax_mod_close(hax);
+ }
+
+ return ret;
+}
+
+static int hax_accel_init(MachineState *ms)
+{
+ int ret = hax_init(ms->ram_size);
+
+ if (ret && (ret != -ENOSPC)) {
+ fprintf(stderr, "No accelerator found.\n");
+ } else {
+ fprintf(stdout, "HAX is %s and emulator runs in %s mode.\n",
+ !ret ? "working" : "not working",
+ !ret ? "fast virt" : "emulation");
+ }
+ return ret;
+}
+
+static int hax_handle_fastmmio(CPUArchState *env, struct hax_fastmmio *hft)
+{
+ if (hft->direction < 2) {
+ cpu_physical_memory_rw(hft->gpa, (uint8_t *) &hft->value, hft->size,
+ hft->direction);
+ } else {
+ /*
+ * HAX API v4 supports transferring data between two MMIO addresses,
+ * hft->gpa and hft->gpa2 (instructions such as MOVS require this):
+ * hft->direction == 2: gpa ==> gpa2
+ */
+ uint64_t value;
+ cpu_physical_memory_rw(hft->gpa, (uint8_t *) &value, hft->size, 0);
+ cpu_physical_memory_rw(hft->gpa2, (uint8_t *) &value, hft->size, 1);
+ }
+
+ return 0;
+}
+
+static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
+ int direction, int size, int count, void *buffer)
+{
+ uint8_t *ptr;
+ int i;
+ MemTxAttrs attrs = { 0 };
+
+ if (!df) {
+ ptr = (uint8_t *) buffer;
+ } else {
+ ptr = buffer + size * count - size;
+ }
+ for (i = 0; i < count; i++) {
+ address_space_rw(&address_space_io, port, attrs,
+ ptr, size, direction == HAX_EXIT_IO_OUT);
+ if (!df) {
+ ptr += size;
+ } else {
+ ptr -= size;
+ }
+ }
+
+ return 0;
+}
+
+static int hax_vcpu_interrupt(CPUArchState *env)
+{
+ CPUState *cpu = ENV_GET_CPU(env);
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+
+ /*
+ * Try to inject an interrupt if the guest can accept it
+ * Unlike KVM, HAX kernel check for the eflags, instead of qemu
+ */
+ if (ht->ready_for_interrupt_injection &&
+ (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ int irq;
+
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ hax_inject_interrupt(env, irq);
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+ }
+
+ /* If we have an interrupt but the guest is not ready to receive an
+ * interrupt, request an interrupt window exit. This will
+ * cause a return to userspace as soon as the guest is ready to
+ * receive interrupts. */
+ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ ht->request_interrupt_window = 1;
+ } else {
+ ht->request_interrupt_window = 0;
+ }
+ return 0;
+}
+
+void hax_raise_event(CPUState *cpu)
+{
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+
+ if (!vcpu) {
+ return;
+ }
+ vcpu->tunnel->user_event_pending = 1;
+}
+
+/*
+ * Ask hax kernel module to run the CPU for us till:
+ * 1. Guest crash or shutdown
+ * 2. Need QEMU's emulation like guest execute MMIO instruction
+ * 3. Guest execute HLT
+ * 4. QEMU have Signal/event pending
+ * 5. An unknown VMX exit happens
+ */
+static int hax_vcpu_hax_exec(CPUArchState *env)
+{
+ int ret = 0;
+ CPUState *cpu = ENV_GET_CPU(env);
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+
+ if (!hax_enabled()) {
+ DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip);
+ return 0;
+ }
+
+ cpu->halted = 0;
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(x86_cpu->apic_state);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+ DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
+ cpu->cpu_index);
+ do_cpu_init(x86_cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n",
+ cpu->cpu_index);
+ hax_vcpu_sync_state(env, 0);
+ do_cpu_sipi(x86_cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+
+ do {
+ int hax_ret;
+
+ if (cpu->exit_request) {
+ ret = 1;
+ break;
+ }
+
+ hax_vcpu_interrupt(env);
+
+ qemu_mutex_unlock_iothread();
+ hax_ret = hax_vcpu_run(vcpu);
+ qemu_mutex_lock_iothread();
+ current_cpu = cpu;
+
+ /* Simply continue the vcpu_run if system call interrupted */
+ if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
+ DPRINTF("io window interrupted\n");
+ continue;
+ }
+
+ if (hax_ret < 0) {
+ fprintf(stderr, "vcpu run failed for vcpu %x\n", vcpu->vcpu_id);
+ abort();
+ }
+ switch (ht->_exit_status) {
+ case HAX_EXIT_IO:
+ ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
+ ht->pio._direction,
+ ht->pio._size, ht->pio._count, vcpu->iobuf);
+ break;
+ case HAX_EXIT_FAST_MMIO:
+ ret = hax_handle_fastmmio(env, (struct hax_fastmmio *) vcpu->iobuf);
+ break;
+ /* Guest state changed, currently only for shutdown */
+ case HAX_EXIT_STATECHANGE:
+ fprintf(stdout, "VCPU shutdown request\n");
+ qemu_system_shutdown_request();
+ hax_vcpu_sync_state(env, 0);
+ ret = 1;
+ break;
+ case HAX_EXIT_UNKNOWN_VMEXIT:
+ fprintf(stderr, "Unknown VMX exit %x from guest\n",
+ ht->_exit_reason);
+ qemu_system_reset_request();
+ hax_vcpu_sync_state(env, 0);
+ cpu_dump_state(cpu, stderr, fprintf, 0);
+ ret = -1;
+ break;
+ case HAX_EXIT_HLT:
+ if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ /* hlt instruction with interrupt disabled is shutdown */
+ env->eflags |= IF_MASK;
+ cpu->halted = 1;
+ cpu->exception_index = EXCP_HLT;
+ ret = 1;
+ }
+ break;
+ /* these situations will continue to hax module */
+ case HAX_EXIT_INTERRUPT:
+ case HAX_EXIT_PAUSED:
+ break;
+ case HAX_EXIT_MMIO:
+ /* Should not happen on UG system */
+ fprintf(stderr, "HAX: unsupported MMIO emulation\n");
+ ret = -1;
+ break;
+ case HAX_EXIT_REAL:
+ /* Should not happen on UG system */
+ fprintf(stderr, "HAX: unimplemented real mode emulation\n");
+ ret = -1;
+ break;
+ default:
+ fprintf(stderr, "Unknown exit %x from HAX\n", ht->_exit_status);
+ qemu_system_reset_request();
+ hax_vcpu_sync_state(env, 0);
+ cpu_dump_state(cpu, stderr, fprintf, 0);
+ ret = 1;
+ break;
+ }
+ } while (!ret);
+
+ if (cpu->exit_request) {
+ cpu->exit_request = 0;
+ cpu->exception_index = EXCP_INTERRUPT;
+ }
+ return ret < 0;
+}
+
+static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ hax_arch_get_registers(env);
+ cpu->hax_vcpu_dirty = true;
+}
+
+void hax_cpu_synchronize_state(CPUState *cpu)
+{
+ if (!cpu->hax_vcpu_dirty) {
+ run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
+ }
+}
+
+static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
+ run_on_cpu_data arg)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ hax_vcpu_sync_state(env, 1);
+ cpu->hax_vcpu_dirty = false;
+}
+
+void hax_cpu_synchronize_post_reset(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+}
+
+static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUArchState *env = cpu->env_ptr;
+
+ hax_vcpu_sync_state(env, 1);
+ cpu->hax_vcpu_dirty = false;
+}
+
+void hax_cpu_synchronize_post_init(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+int hax_smp_cpu_exec(CPUState *cpu)
+{
+ CPUArchState *env = (CPUArchState *) (cpu->env_ptr);
+ int fatal;
+ int ret;
+
+ while (1) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
+ ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ break;
+ }
+
+ fatal = hax_vcpu_hax_exec(env);
+
+ if (fatal) {
+ fprintf(stderr, "Unsupported HAX vcpu return\n");
+ abort();
+ }
+ }
+
+ return ret;
+}
+
+static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ memset(lhs, 0, sizeof(struct segment_desc_t));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = 3;
+ lhs->present = 1;
+ lhs->dpl = 3;
+ lhs->operand_size = 0;
+ lhs->desc = 1;
+ lhs->long_mode = 0;
+ lhs->granularity = 0;
+ lhs->available = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT)
+ | (rhs->present * DESC_P_MASK)
+ | (rhs->dpl << DESC_DPL_SHIFT)
+ | (rhs->operand_size << DESC_B_SHIFT)
+ | (rhs->desc * DESC_S_MASK)
+ | (rhs->long_mode << DESC_L_SHIFT)
+ | (rhs->granularity * DESC_G_MASK) | (rhs->available * DESC_AVL_MASK);
+}
+
+static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ unsigned flags = rhs->flags;
+
+ memset(lhs, 0, sizeof(struct segment_desc_t));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+ lhs->present = (flags & DESC_P_MASK) != 0;
+ lhs->dpl = rhs->selector & 3;
+ lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
+ lhs->desc = (flags & DESC_S_MASK) != 0;
+ lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
+ lhs->granularity = (flags & DESC_G_MASK) != 0;
+ lhs->available = (flags & DESC_AVL_MASK) != 0;
+}
+
+static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
+{
+ target_ulong reg = *hax_reg;
+
+ if (set) {
+ *hax_reg = *qemu_reg;
+ } else {
+ *qemu_reg = reg;
+ }
+}
+
+/* The sregs has been synced with HAX kernel already before this call */
+static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs)
+{
+ get_seg(&env->segs[R_CS], &sregs->_cs);
+ get_seg(&env->segs[R_DS], &sregs->_ds);
+ get_seg(&env->segs[R_ES], &sregs->_es);
+ get_seg(&env->segs[R_FS], &sregs->_fs);
+ get_seg(&env->segs[R_GS], &sregs->_gs);
+ get_seg(&env->segs[R_SS], &sregs->_ss);
+
+ get_seg(&env->tr, &sregs->_tr);
+ get_seg(&env->ldt, &sregs->_ldt);
+ env->idt.limit = sregs->_idt.limit;
+ env->idt.base = sregs->_idt.base;
+ env->gdt.limit = sregs->_gdt.limit;
+ env->gdt.base = sregs->_gdt.base;
+ return 0;
+}
+
+static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
+{
+ if ((env->eflags & VM_MASK)) {
+ set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
+ } else {
+ set_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_seg(&sregs->_es, &env->segs[R_ES]);
+ set_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_seg(&sregs->_ss, &env->segs[R_SS]);
+
+ if (env->cr[0] & CR0_PE_MASK) {
+ /* force ss cpl to cs cpl */
+ sregs->_ss.selector = (sregs->_ss.selector & ~3) |
+ (sregs->_cs.selector & 3);
+ sregs->_ss.dpl = sregs->_ss.selector & 3;
+ }
+ }
+
+ set_seg(&sregs->_tr, &env->tr);
+ set_seg(&sregs->_ldt, &env->ldt);
+ sregs->_idt.limit = env->idt.limit;
+ sregs->_idt.base = env->idt.base;
+ sregs->_gdt.limit = env->gdt.limit;
+ sregs->_gdt.base = env->gdt.base;
+ return 0;
+}
+
+/*
+ * After get the state from the kernel module, some
+ * qemu emulator state need be updated also
+ */
+static int hax_setup_qemu_emulator(CPUArchState *env)
+{
+
+#define HFLAG_COPY_MASK (~( \
+ HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
+ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
+ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
+ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK))
+
+ uint32_t hflags;
+
+ hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
+ hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
+ (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
+ hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
+ hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
+ (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
+
+ if (env->efer & MSR_EFER_LMA) {
+ hflags |= HF_LMA_MASK;
+ }
+
+ if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
+ hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ } else {
+ hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_CS32_SHIFT);
+ hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (!(env->cr[0] & CR0_PE_MASK) ||
+ (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) {
+ hflags |= HF_ADDSEG_MASK;
+ } else {
+ hflags |= ((env->segs[R_DS].base |
+ env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
+ }
+ }
+
+ hflags &= ~HF_SMM_MASK;
+
+ env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
+ return 0;
+}
+
+static int hax_sync_vcpu_register(CPUArchState *env, int set)
+{
+ struct vcpu_state_t regs;
+ int ret;
+ memset(&regs, 0, sizeof(struct vcpu_state_t));
+
+ if (!set) {
+ ret = hax_sync_vcpu_state(env, &regs, 0);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+
+ /* generic register */
+ hax_getput_reg(&regs._rax, &env->regs[R_EAX], set);
+ hax_getput_reg(&regs._rbx, &env->regs[R_EBX], set);
+ hax_getput_reg(&regs._rcx, &env->regs[R_ECX], set);
+ hax_getput_reg(&regs._rdx, &env->regs[R_EDX], set);
+ hax_getput_reg(&regs._rsi, &env->regs[R_ESI], set);
+ hax_getput_reg(&regs._rdi, &env->regs[R_EDI], set);
+ hax_getput_reg(&regs._rsp, &env->regs[R_ESP], set);
+ hax_getput_reg(&regs._rbp, &env->regs[R_EBP], set);
+#ifdef TARGET_X86_64
+ hax_getput_reg(&regs._r8, &env->regs[8], set);
+ hax_getput_reg(&regs._r9, &env->regs[9], set);
+ hax_getput_reg(&regs._r10, &env->regs[10], set);
+ hax_getput_reg(&regs._r11, &env->regs[11], set);
+ hax_getput_reg(&regs._r12, &env->regs[12], set);
+ hax_getput_reg(&regs._r13, &env->regs[13], set);
+ hax_getput_reg(&regs._r14, &env->regs[14], set);
+ hax_getput_reg(&regs._r15, &env->regs[15], set);
+#endif
+ hax_getput_reg(&regs._rflags, &env->eflags, set);
+ hax_getput_reg(&regs._rip, &env->eip, set);
+
+ if (set) {
+ regs._cr0 = env->cr[0];
+ regs._cr2 = env->cr[2];
+ regs._cr3 = env->cr[3];
+ regs._cr4 = env->cr[4];
+ hax_set_segments(env, &regs);
+ } else {
+ env->cr[0] = regs._cr0;
+ env->cr[2] = regs._cr2;
+ env->cr[3] = regs._cr3;
+ env->cr[4] = regs._cr4;
+ hax_get_segments(env, &regs);
+ }
+
+ if (set) {
+ ret = hax_sync_vcpu_state(env, &regs, 1);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ if (!set) {
+ hax_setup_qemu_emulator(env);
+ }
+ return 0;
+}
+
+static void hax_msr_entry_set(struct vmx_msr *item, uint32_t index,
+ uint64_t value)
+{
+ item->entry = index;
+ item->value = value;
+}
+
+static int hax_get_msrs(CPUArchState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs = md.entries;
+ int ret, i, n;
+
+ n = 0;
+ msrs[n++].entry = MSR_IA32_SYSENTER_CS;
+ msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
+ msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
+ msrs[n++].entry = MSR_IA32_TSC;
+#ifdef TARGET_X86_64
+ msrs[n++].entry = MSR_EFER;
+ msrs[n++].entry = MSR_STAR;
+ msrs[n++].entry = MSR_LSTAR;
+ msrs[n++].entry = MSR_CSTAR;
+ msrs[n++].entry = MSR_FMASK;
+ msrs[n++].entry = MSR_KERNELGSBASE;
+#endif
+ md.nr_msr = n;
+ ret = hax_sync_msr(env, &md, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < md.done; i++) {
+ switch (msrs[i].entry) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = msrs[i].value;
+ break;
+ case MSR_IA32_TSC:
+ env->tsc = msrs[i].value;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_EFER:
+ env->efer = msrs[i].value;
+ break;
+ case MSR_STAR:
+ env->star = msrs[i].value;
+ break;
+ case MSR_LSTAR:
+ env->lstar = msrs[i].value;
+ break;
+ case MSR_CSTAR:
+ env->cstar = msrs[i].value;
+ break;
+ case MSR_FMASK:
+ env->fmask = msrs[i].value;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = msrs[i].value;
+ break;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static int hax_set_msrs(CPUArchState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs;
+ msrs = md.entries;
+ int n = 0;
+
+ memset(&md, 0, sizeof(struct hax_msr_data));
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+#ifdef TARGET_X86_64
+ hax_msr_entry_set(&msrs[n++], MSR_EFER, env->efer);
+ hax_msr_entry_set(&msrs[n++], MSR_STAR, env->star);
+ hax_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar);
+ hax_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
+ hax_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask);
+ hax_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase);
+#endif
+ md.nr_msr = n;
+ md.done = 0;
+
+ return hax_sync_msr(env, &md, 1);
+}
+
+static int hax_get_fpu(CPUArchState *env)
+{
+ struct fx_layout fpu;
+ int i, ret;
+
+ ret = hax_sync_fpu(env, &fpu, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->fpstt = (fpu.fsw >> 11) & 7;
+ env->fpus = fpu.fsw;
+ env->fpuc = fpu.fcw;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((fpu.ftw >> i) & 1);
+ }
+ memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
+
+ for (i = 0; i < 8; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.mmx_1[i][0]);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.mmx_1[i][8]);
+ if (CPU_NB_REGS > 8) {
+ env->xmm_regs[i + 8].ZMM_Q(0) = ldq_p(&fpu.mmx_2[i][0]);
+ env->xmm_regs[i + 8].ZMM_Q(1) = ldq_p(&fpu.mmx_2[i][8]);
+ }
+ }
+ env->mxcsr = fpu.mxcsr;
+
+ return 0;
+}
+
+static int hax_set_fpu(CPUArchState *env)
+{
+ struct fx_layout fpu;
+ int i;
+
+ memset(&fpu, 0, sizeof(fpu));
+ fpu.fsw = env->fpus & ~(7 << 11);
+ fpu.fsw |= (env->fpstt & 7) << 11;
+ fpu.fcw = env->fpuc;
+
+ for (i = 0; i < 8; ++i) {
+ fpu.ftw |= (!env->fptags[i]) << i;
+ }
+
+ memcpy(fpu.st_mm, env->fpregs, sizeof(env->fpregs));
+ for (i = 0; i < 8; i++) {
+ stq_p(&fpu.mmx_1[i][0], env->xmm_regs[i].ZMM_Q(0));
+ stq_p(&fpu.mmx_1[i][8], env->xmm_regs[i].ZMM_Q(1));
+ if (CPU_NB_REGS > 8) {
+ stq_p(&fpu.mmx_2[i][0], env->xmm_regs[i + 8].ZMM_Q(0));
+ stq_p(&fpu.mmx_2[i][8], env->xmm_regs[i + 8].ZMM_Q(1));
+ }
+ }
+
+ fpu.mxcsr = env->mxcsr;
+
+ return hax_sync_fpu(env, &fpu, 1);
+}
+
+static int hax_arch_get_registers(CPUArchState *env)
+{
+ int ret;
+
+ ret = hax_sync_vcpu_register(env, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = hax_get_fpu(env);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = hax_get_msrs(env);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hax_arch_set_registers(CPUArchState *env)
+{
+ int ret;
+ ret = hax_sync_vcpu_register(env, 1);
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed to sync vcpu reg\n");
+ return ret;
+ }
+ ret = hax_set_fpu(env);
+ if (ret < 0) {
+ fprintf(stderr, "FPU failed\n");
+ return ret;
+ }
+ ret = hax_set_msrs(env);
+ if (ret < 0) {
+ fprintf(stderr, "MSR failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hax_vcpu_sync_state(CPUArchState *env, int modified)
+{
+ if (hax_enabled()) {
+ if (modified) {
+ hax_arch_set_registers(env);
+ } else {
+ hax_arch_get_registers(env);
+ }
+ }
+}
+
+/*
+ * much simpler than kvm, at least in first stage because:
+ * We don't need consider the device pass-through, we don't need
+ * consider the framebuffer, and we may even remove the bios at all
+ */
+int hax_sync_vcpus(void)
+{
+ if (hax_enabled()) {
+ CPUState *cpu;
+
+ cpu = first_cpu;
+ if (!cpu) {
+ return 0;
+ }
+
+ for (; cpu != NULL; cpu = CPU_NEXT(cpu)) {
+ int ret;
+
+ ret = hax_arch_set_registers(cpu->env_ptr);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void hax_reset_vcpu_state(void *opaque)
+{
+ CPUState *cpu;
+ for (cpu = first_cpu; cpu != NULL; cpu = CPU_NEXT(cpu)) {
+ cpu->hax_vcpu->tunnel->user_event_pending = 0;
+ cpu->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
+ }
+}
+
+static void hax_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelClass *ac = ACCEL_CLASS(oc);
+ ac->name = "HAX";
+ ac->init_machine = hax_accel_init;
+ ac->allowed = &hax_allowed;
+}
+
+static const TypeInfo hax_accel_type = {
+ .name = ACCEL_CLASS_NAME("hax"),
+ .parent = TYPE_ACCEL,
+ .class_init = hax_accel_class_init,
+};
+
+static void hax_type_init(void)
+{
+ type_register_static(&hax_accel_type);
+}
+
+type_init(hax_type_init);
diff --git a/target/i386/hax-darwin.c b/target/i386/hax-darwin.c
new file mode 100644
index 0000000000..1c5bbd0a2d
--- /dev/null
+++ b/target/i386/hax-darwin.c
@@ -0,0 +1,316 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* HAX module interface - darwin version */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+
+#include "qemu/osdep.h"
+#include "target/i386/hax-i386.h"
+
+hax_fd hax_mod_open(void)
+{
+ int fd = open("/dev/HAX", O_RDWR);
+ if (fd == -1) {
+ fprintf(stderr, "Failed to open the hax module\n");
+ }
+
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+
+ return fd;
+}
+
+int hax_populate_ram(uint64_t va, uint32_t size)
+{
+ int ret;
+ struct hax_alloc_ram_info info;
+
+ if (!hax_global.vm || !hax_global.vm->fd) {
+ fprintf(stderr, "Allocate memory before vm create?\n");
+ return -EINVAL;
+ }
+
+ info.size = size;
+ info.va = va;
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to allocate %x memory\n", size);
+ return ret;
+ }
+ return 0;
+}
+
+int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
+{
+ struct hax_set_ram_info info;
+ int ret;
+
+ info.pa_start = start_pa;
+ info.size = size;
+ info.va = host_va;
+ info.flags = (uint8_t) flags;
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, &info);
+ if (ret < 0) {
+ return -errno;
+ }
+ return 0;
+}
+
+int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
+{
+ int ret;
+
+ ret = ioctl(hax->fd, HAX_IOCTL_CAPABILITY, cap);
+ if (ret == -1) {
+ fprintf(stderr, "Failed to get HAX capability\n");
+ return -errno;
+ }
+
+ return 0;
+}
+
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
+{
+ int ret;
+
+ ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
+ if (ret == -1) {
+ fprintf(stderr, "Failed to get HAX version\n");
+ return -errno;
+ }
+
+ return 0;
+}
+
+static char *hax_vm_devfs_string(int vm_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID) {
+ fprintf(stderr, "Too big VM id\n");
+ return NULL;
+ }
+
+#define HAX_VM_DEVFS "/dev/hax_vm/vmxx"
+ name = g_strdup(HAX_VM_DEVFS);
+ if (!name) {
+ return NULL;
+ }
+
+ snprintf(name, sizeof HAX_VM_DEVFS, "/dev/hax_vm/vm%02d", vm_id);
+ return name;
+}
+
+static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID) {
+ fprintf(stderr, "Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
+ return NULL;
+ }
+
+#define HAX_VCPU_DEVFS "/dev/hax_vmxx/vcpuxx"
+ name = g_strdup(HAX_VCPU_DEVFS);
+ if (!name) {
+ return NULL;
+ }
+
+ snprintf(name, sizeof HAX_VCPU_DEVFS, "/dev/hax_vm%02d/vcpu%02d",
+ vm_id, vcpu_id);
+ return name;
+}
+
+int hax_host_create_vm(struct hax_state *hax, int *vmid)
+{
+ int ret;
+ int vm_id = 0;
+
+ if (hax_invalid_fd(hax->fd)) {
+ return -EINVAL;
+ }
+
+ if (hax->vm) {
+ return 0;
+ }
+
+ ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
+ *vmid = vm_id;
+ return ret;
+}
+
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
+{
+ hax_fd fd;
+ char *vm_name = NULL;
+
+ vm_name = hax_vm_devfs_string(vm_id);
+ if (!vm_name) {
+ return -1;
+ }
+
+ fd = open(vm_name, O_RDWR);
+ g_free(vm_name);
+
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+
+ return fd;
+}
+
+int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
+{
+ int ret;
+
+ if (hax_invalid_fd(vm_fd)) {
+ return -EINVAL;
+ }
+
+ ret = ioctl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion);
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed to notify qemu API version\n");
+ return ret;
+ }
+ return 0;
+}
+
+/* Simply assume the size should be bigger than the hax_tunnel,
+ * since the hax_tunnel can be extended later with compatibility considered
+ */
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
+{
+ int ret;
+
+ ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
+ }
+
+ return ret;
+}
+
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
+{
+ char *devfs_path = NULL;
+ hax_fd fd;
+
+ devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
+ if (!devfs_path) {
+ fprintf(stderr, "Failed to get the devfs\n");
+ return -EINVAL;
+ }
+
+ fd = open(devfs_path, O_RDWR);
+ g_free(devfs_path);
+ if (fd < 0) {
+ fprintf(stderr, "Failed to open the vcpu devfs\n");
+ }
+ fcntl(fd, F_SETFD, FD_CLOEXEC);
+ return fd;
+}
+
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+{
+ int ret;
+ struct hax_tunnel_info info;
+
+ ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
+ if (ret) {
+ fprintf(stderr, "Failed to setup the hax tunnel\n");
+ return ret;
+ }
+
+ if (!valid_hax_tunnel_size(info.size)) {
+ fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
+ vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
+ return 0;
+}
+
+int hax_vcpu_run(struct hax_vcpu_state *vcpu)
+{
+ int ret;
+
+ ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
+ return ret;
+}
+
+int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+
+ if (set) {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
+ } else {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
+ }
+ return ret;
+}
+
+int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+ if (set) {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
+ } else {
+ ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
+ }
+ return ret;
+}
+
+int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+
+ if (set) {
+ ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
+ } else {
+ ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
+ }
+ return ret;
+}
+
+int hax_inject_interrupt(CPUArchState *env, int vector)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0) {
+ return -1;
+ }
+
+ ret = ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);
+ return ret;
+}
diff --git a/target/i386/hax-darwin.h b/target/i386/hax-darwin.h
new file mode 100644
index 0000000000..0c0968b77d
--- /dev/null
+++ b/target/i386/hax-darwin.h
@@ -0,0 +1,63 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HAX_DARWIN_H
+#define TARGET_I386_HAX_DARWIN_H
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdarg.h>
+
+#define HAX_INVALID_FD (-1)
+static inline int hax_invalid_fd(hax_fd fd)
+{
+ return fd <= 0;
+}
+
+static inline void hax_mod_close(struct hax_state *hax)
+{
+ close(hax->fd);
+}
+
+static inline void hax_close_fd(hax_fd fd)
+{
+ close(fd);
+}
+
+/* HAX model level ioctl */
+#define HAX_IOCTL_VERSION _IOWR(0, 0x20, struct hax_module_version)
+#define HAX_IOCTL_CREATE_VM _IOWR(0, 0x21, uint32_t)
+#define HAX_IOCTL_DESTROY_VM _IOW(0, 0x22, uint32_t)
+#define HAX_IOCTL_CAPABILITY _IOR(0, 0x23, struct hax_capabilityinfo)
+
+#define HAX_VM_IOCTL_VCPU_CREATE _IOWR(0, 0x80, uint32_t)
+#define HAX_VM_IOCTL_ALLOC_RAM _IOWR(0, 0x81, struct hax_alloc_ram_info)
+#define HAX_VM_IOCTL_SET_RAM _IOWR(0, 0x82, struct hax_set_ram_info)
+#define HAX_VM_IOCTL_VCPU_DESTROY _IOW(0, 0x83, uint32_t)
+#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION _IOW(0, 0x84, struct hax_qemu_version)
+
+#define HAX_VCPU_IOCTL_RUN _IO(0, 0xc0)
+#define HAX_VCPU_IOCTL_SET_MSRS _IOWR(0, 0xc1, struct hax_msr_data)
+#define HAX_VCPU_IOCTL_GET_MSRS _IOWR(0, 0xc2, struct hax_msr_data)
+
+#define HAX_VCPU_IOCTL_SET_FPU _IOW(0, 0xc3, struct fx_layout)
+#define HAX_VCPU_IOCTL_GET_FPU _IOR(0, 0xc4, struct fx_layout)
+
+#define HAX_VCPU_IOCTL_SETUP_TUNNEL _IOWR(0, 0xc5, struct hax_tunnel_info)
+#define HAX_VCPU_IOCTL_INTERRUPT _IOWR(0, 0xc6, uint32_t)
+#define HAX_VCPU_SET_REGS _IOWR(0, 0xc7, struct vcpu_state_t)
+#define HAX_VCPU_GET_REGS _IOWR(0, 0xc8, struct vcpu_state_t)
+
+#endif /* TARGET_I386_HAX_DARWIN_H */
diff --git a/target/i386/hax-i386.h b/target/i386/hax-i386.h
new file mode 100644
index 0000000000..8ffe91fcb5
--- /dev/null
+++ b/target/i386/hax-i386.h
@@ -0,0 +1,94 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _HAX_I386_H
+#define _HAX_I386_H
+
+#include "cpu.h"
+#include "sysemu/hax.h"
+
+#ifdef CONFIG_DARWIN
+typedef int hax_fd;
+#endif
+
+#ifdef CONFIG_WIN32
+typedef HANDLE hax_fd;
+#endif
+
+extern struct hax_state hax_global;
+struct hax_vcpu_state {
+ hax_fd fd;
+ int vcpu_id;
+ struct hax_tunnel *tunnel;
+ unsigned char *iobuf;
+};
+
+struct hax_state {
+ hax_fd fd; /* the global hax device interface */
+ uint32_t version;
+ struct hax_vm *vm;
+ uint64_t mem_quota;
+};
+
+#define HAX_MAX_VCPU 0x10
+#define MAX_VM_ID 0x40
+#define MAX_VCPU_ID 0x40
+
+struct hax_vm {
+ hax_fd fd;
+ int id;
+ struct hax_vcpu_state *vcpus[HAX_MAX_VCPU];
+};
+
+#ifdef NEED_CPU_H
+/* Functions exported to host specific mode */
+hax_fd hax_vcpu_get_fd(CPUArchState *env);
+int valid_hax_tunnel_size(uint16_t size);
+
+/* Host specific functions */
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
+int hax_inject_interrupt(CPUArchState *env, int vector);
+struct hax_vm *hax_vm_create(struct hax_state *hax);
+int hax_vcpu_run(struct hax_vcpu_state *vcpu);
+int hax_vcpu_create(int id);
+int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state,
+ int set);
+int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set);
+int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set);
+#endif
+
+int hax_vm_destroy(struct hax_vm *vm);
+int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap);
+int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion);
+int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags);
+
+/* Common host function */
+int hax_host_create_vm(struct hax_state *hax, int *vm_id);
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id);
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid);
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid);
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu);
+hax_fd hax_mod_open(void);
+void hax_memory_init(void);
+
+
+#ifdef CONFIG_DARWIN
+#include "target/i386/hax-darwin.h"
+#endif
+
+#ifdef CONFIG_WIN32
+#include "target/i386/hax-windows.h"
+#endif
+
+#include "target/i386/hax-interface.h"
+
+#endif
diff --git a/target/i386/hax-interface.h b/target/i386/hax-interface.h
new file mode 100644
index 0000000000..d141308831
--- /dev/null
+++ b/target/i386/hax-interface.h
@@ -0,0 +1,361 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* Interface with HAX kernel module */
+
+#ifndef _HAX_INTERFACE_H
+#define _HAX_INTERFACE_H
+
+/* fx_layout has 3 formats table 3-56, 512bytes */
+struct fx_layout {
+ uint16_t fcw;
+ uint16_t fsw;
+ uint8_t ftw;
+ uint8_t res1;
+ uint16_t fop;
+ union {
+ struct {
+ uint32_t fip;
+ uint16_t fcs;
+ uint16_t res2;
+ };
+ uint64_t fpu_ip;
+ };
+ union {
+ struct {
+ uint32_t fdp;
+ uint16_t fds;
+ uint16_t res3;
+ };
+ uint64_t fpu_dp;
+ };
+ uint32_t mxcsr;
+ uint32_t mxcsr_mask;
+ uint8_t st_mm[8][16];
+ uint8_t mmx_1[8][16];
+ uint8_t mmx_2[8][16];
+ uint8_t pad[96];
+} __attribute__ ((aligned(8)));
+
+struct vmx_msr {
+ uint64_t entry;
+ uint64_t value;
+} __attribute__ ((__packed__));
+
+/*
+ * Fixed array is not good, but it makes Mac support a bit easier by avoiding
+ * memory map or copyin staff.
+ */
+#define HAX_MAX_MSR_ARRAY 0x20
+struct hax_msr_data {
+ uint16_t nr_msr;
+ uint16_t done;
+ uint16_t pad[2];
+ struct vmx_msr entries[HAX_MAX_MSR_ARRAY];
+} __attribute__ ((__packed__));
+
+union interruptibility_state_t {
+ uint32_t raw;
+ struct {
+ uint32_t sti_blocking:1;
+ uint32_t movss_blocking:1;
+ uint32_t smi_blocking:1;
+ uint32_t nmi_blocking:1;
+ uint32_t reserved:28;
+ };
+ uint64_t pad;
+};
+
+typedef union interruptibility_state_t interruptibility_state_t;
+
+/* Segment descriptor */
+struct segment_desc_t {
+ uint16_t selector;
+ uint16_t _dummy;
+ uint32_t limit;
+ uint64_t base;
+ union {
+ struct {
+ uint32_t type:4;
+ uint32_t desc:1;
+ uint32_t dpl:2;
+ uint32_t present:1;
+ uint32_t:4;
+ uint32_t available:1;
+ uint32_t long_mode:1;
+ uint32_t operand_size:1;
+ uint32_t granularity:1;
+ uint32_t null:1;
+ uint32_t:15;
+ };
+ uint32_t ar;
+ };
+ uint32_t ipad;
+};
+
+typedef struct segment_desc_t segment_desc_t;
+
+struct vcpu_state_t {
+ union {
+ uint64_t _regs[16];
+ struct {
+ union {
+ struct {
+ uint8_t _al, _ah;
+ };
+ uint16_t _ax;
+ uint32_t _eax;
+ uint64_t _rax;
+ };
+ union {
+ struct {
+ uint8_t _cl, _ch;
+ };
+ uint16_t _cx;
+ uint32_t _ecx;
+ uint64_t _rcx;
+ };
+ union {
+ struct {
+ uint8_t _dl, _dh;
+ };
+ uint16_t _dx;
+ uint32_t _edx;
+ uint64_t _rdx;
+ };
+ union {
+ struct {
+ uint8_t _bl, _bh;
+ };
+ uint16_t _bx;
+ uint32_t _ebx;
+ uint64_t _rbx;
+ };
+ union {
+ uint16_t _sp;
+ uint32_t _esp;
+ uint64_t _rsp;
+ };
+ union {
+ uint16_t _bp;
+ uint32_t _ebp;
+ uint64_t _rbp;
+ };
+ union {
+ uint16_t _si;
+ uint32_t _esi;
+ uint64_t _rsi;
+ };
+ union {
+ uint16_t _di;
+ uint32_t _edi;
+ uint64_t _rdi;
+ };
+
+ uint64_t _r8;
+ uint64_t _r9;
+ uint64_t _r10;
+ uint64_t _r11;
+ uint64_t _r12;
+ uint64_t _r13;
+ uint64_t _r14;
+ uint64_t _r15;
+ };
+ };
+
+ union {
+ uint32_t _eip;
+ uint64_t _rip;
+ };
+
+ union {
+ uint32_t _eflags;
+ uint64_t _rflags;
+ };
+
+ segment_desc_t _cs;
+ segment_desc_t _ss;
+ segment_desc_t _ds;
+ segment_desc_t _es;
+ segment_desc_t _fs;
+ segment_desc_t _gs;
+ segment_desc_t _ldt;
+ segment_desc_t _tr;
+
+ segment_desc_t _gdt;
+ segment_desc_t _idt;
+
+ uint64_t _cr0;
+ uint64_t _cr2;
+ uint64_t _cr3;
+ uint64_t _cr4;
+
+ uint64_t _dr0;
+ uint64_t _dr1;
+ uint64_t _dr2;
+ uint64_t _dr3;
+ uint64_t _dr6;
+ uint64_t _dr7;
+ uint64_t _pde;
+
+ uint32_t _efer;
+
+ uint32_t _sysenter_cs;
+ uint64_t _sysenter_eip;
+ uint64_t _sysenter_esp;
+
+ uint32_t _activity_state;
+ uint32_t pad;
+ interruptibility_state_t _interruptibility_state;
+};
+
+/* HAX exit status */
+enum exit_status {
+ /* IO port request */
+ HAX_EXIT_IO = 1,
+ /* MMIO instruction emulation */
+ HAX_EXIT_MMIO,
+ /* QEMU emulation mode request, currently means guest enter non-PG mode */
+ HAX_EXIT_REAL,
+ /*
+ * Interrupt window open, qemu can inject interrupt now
+ * Also used when signal pending since at that time qemu usually need
+ * check interrupt
+ */
+ HAX_EXIT_INTERRUPT,
+ /* Unknown vmexit, mostly trigger reboot */
+ HAX_EXIT_UNKNOWN_VMEXIT,
+ /* HALT from guest */
+ HAX_EXIT_HLT,
+ /* Reboot request, like because of tripple fault in guest */
+ HAX_EXIT_STATECHANGE,
+ /* the vcpu is now only paused when destroy, so simply return to hax */
+ HAX_EXIT_PAUSED,
+ HAX_EXIT_FAST_MMIO,
+};
+
+/*
+ * The interface definition:
+ * 1. vcpu_run execute will return 0 on success, otherwise mean failed
+ * 2. exit_status return the exit reason, as stated in enum exit_status
+ * 3. exit_reason is the vmx exit reason
+ */
+struct hax_tunnel {
+ uint32_t _exit_reason;
+ uint32_t _exit_flag;
+ uint32_t _exit_status;
+ uint32_t user_event_pending;
+ int ready_for_interrupt_injection;
+ int request_interrupt_window;
+ union {
+ struct {
+ /* 0: read, 1: write */
+#define HAX_EXIT_IO_IN 1
+#define HAX_EXIT_IO_OUT 0
+ uint8_t _direction;
+ uint8_t _df;
+ uint16_t _size;
+ uint16_t _port;
+ uint16_t _count;
+ uint8_t _flags;
+ uint8_t _pad0;
+ uint16_t _pad1;
+ uint32_t _pad2;
+ uint64_t _vaddr;
+ } pio;
+ struct {
+ uint64_t gla;
+ } mmio;
+ struct {
+ } state;
+ };
+} __attribute__ ((__packed__));
+
+struct hax_module_version {
+ uint32_t compat_version;
+ uint32_t cur_version;
+} __attribute__ ((__packed__));
+
+/* This interface is support only after API version 2 */
+struct hax_qemu_version {
+ /* Current API version in QEMU */
+ uint32_t cur_version;
+ /* The minimum API version supported by QEMU */
+ uint32_t min_version;
+} __attribute__ ((__packed__));
+
+/* The mac specfic interface to qemu, mostly is ioctl related */
+struct hax_tunnel_info {
+ uint64_t va;
+ uint64_t io_va;
+ uint16_t size;
+ uint16_t pad[3];
+} __attribute__ ((__packed__));
+
+struct hax_alloc_ram_info {
+ uint32_t size;
+ uint32_t pad;
+ uint64_t va;
+} __attribute__ ((__packed__));
+#define HAX_RAM_INFO_ROM 0x01 /* Read-Only */
+#define HAX_RAM_INFO_INVALID 0x80 /* Unmapped, usually used for MMIO */
+struct hax_set_ram_info {
+ uint64_t pa_start;
+ uint32_t size;
+ uint8_t flags;
+ uint8_t pad[3];
+ uint64_t va;
+} __attribute__ ((__packed__));
+
+#define HAX_CAP_STATUS_WORKING 0x1
+#define HAX_CAP_STATUS_NOTWORKING 0x0
+#define HAX_CAP_WORKSTATUS_MASK 0x1
+
+#define HAX_CAP_FAILREASON_VT 0x1
+#define HAX_CAP_FAILREASON_NX 0x2
+
+#define HAX_CAP_MEMQUOTA 0x2
+#define HAX_CAP_UG 0x4
+
+struct hax_capabilityinfo {
+ /* bit 0: 1 - working
+ * 0 - not working, possibly because NT/NX disabled
+ * bit 1: 1 - memory limitation working
+ * 0 - no memory limitation
+ */
+ uint16_t wstatus;
+ /* valid when not working
+ * bit 0: VT not enabeld
+ * bit 1: NX not enabled*/
+ uint16_t winfo;
+ uint32_t pad;
+ uint64_t mem_quota;
+} __attribute__ ((__packed__));
+
+struct hax_fastmmio {
+ uint64_t gpa;
+ union {
+ uint64_t value;
+ uint64_t gpa2; /* since HAX API v4 */
+ };
+ uint8_t size;
+ uint8_t direction;
+ uint16_t reg_index;
+ uint32_t pad0;
+ uint64_t _cr0;
+ uint64_t _cr2;
+ uint64_t _cr3;
+ uint64_t _cr4;
+} __attribute__ ((__packed__));
+#endif
diff --git a/target/i386/hax-mem.c b/target/i386/hax-mem.c
new file mode 100644
index 0000000000..2884040021
--- /dev/null
+++ b/target/i386/hax-mem.c
@@ -0,0 +1,289 @@
+/*
+ * HAX memory mapping operations
+ *
+ * Copyright (c) 2015-16 Intel Corporation
+ * Copyright 2016 Google, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/address-spaces.h"
+#include "exec/exec-all.h"
+
+#include "target/i386/hax-i386.h"
+#include "qemu/queue.h"
+
+#define DEBUG_HAX_MEM 0
+
+#define DPRINTF(fmt, ...) \
+ do { \
+ if (DEBUG_HAX_MEM) { \
+ fprintf(stdout, fmt, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+/**
+ * HAXMapping: describes a pending guest physical memory mapping
+ *
+ * @start_pa: a guest physical address marking the start of the region; must be
+ * page-aligned
+ * @size: a guest physical address marking the end of the region; must be
+ * page-aligned
+ * @host_va: the host virtual address of the start of the mapping
+ * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
+ * @entry: additional fields for linking #HAXMapping instances together
+ */
+typedef struct HAXMapping {
+ uint64_t start_pa;
+ uint32_t size;
+ uint64_t host_va;
+ int flags;
+ QTAILQ_ENTRY(HAXMapping) entry;
+} HAXMapping;
+
+/*
+ * A doubly-linked list (actually a tail queue) of the pending page mappings
+ * for the ongoing memory transaction.
+ *
+ * It is used to optimize the number of page mapping updates done through the
+ * kernel module. For example, it's effective when a driver is digging an MMIO
+ * hole inside an existing memory mapping. It will get a deletion of the whole
+ * region, then the addition of the 2 remaining RAM areas around the hole and
+ * finally the memory transaction commit. During the commit, it will effectively
+ * send to the kernel only the removal of the pages from the MMIO hole after
+ * having computed locally the result of the deletion and additions.
+ */
+static QTAILQ_HEAD(HAXMappingListHead, HAXMapping) mappings =
+ QTAILQ_HEAD_INITIALIZER(mappings);
+
+/**
+ * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
+ */
+static void hax_mapping_dump_list(void)
+{
+ HAXMapping *entry;
+
+ DPRINTF("%s updates:\n", __func__);
+ QTAILQ_FOREACH(entry, &mappings, entry) {
+ DPRINTF("\t%c 0x%016" PRIx64 "->0x%016" PRIx64 " VA 0x%016" PRIx64
+ "%s\n", entry->flags & HAX_RAM_INFO_INVALID ? '-' : '+',
+ entry->start_pa, entry->start_pa + entry->size, entry->host_va,
+ entry->flags & HAX_RAM_INFO_ROM ? " ROM" : "");
+ }
+}
+
+static void hax_insert_mapping_before(HAXMapping *next, uint64_t start_pa,
+ uint32_t size, uint64_t host_va,
+ uint8_t flags)
+{
+ HAXMapping *entry;
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->start_pa = start_pa;
+ entry->size = size;
+ entry->host_va = host_va;
+ entry->flags = flags;
+ if (!next) {
+ QTAILQ_INSERT_TAIL(&mappings, entry, entry);
+ } else {
+ QTAILQ_INSERT_BEFORE(next, entry, entry);
+ }
+}
+
+static bool hax_mapping_is_opposite(HAXMapping *entry, uint64_t host_va,
+ uint8_t flags)
+{
+ /* removed then added without change for the read-only flag */
+ bool nop_flags = (entry->flags ^ flags) == HAX_RAM_INFO_INVALID;
+
+ return (entry->host_va == host_va) && nop_flags;
+}
+
+static void hax_update_mapping(uint64_t start_pa, uint32_t size,
+ uint64_t host_va, uint8_t flags)
+{
+ uint64_t end_pa = start_pa + size;
+ uint32_t chunk_sz;
+ HAXMapping *entry, *next;
+
+ QTAILQ_FOREACH_SAFE(entry, &mappings, entry, next) {
+ if (start_pa >= entry->start_pa + entry->size) {
+ continue;
+ }
+ if (start_pa < entry->start_pa) {
+ chunk_sz = end_pa <= entry->start_pa ? size
+ : entry->start_pa - start_pa;
+ hax_insert_mapping_before(entry, start_pa, chunk_sz,
+ host_va, flags);
+ start_pa += chunk_sz;
+ host_va += chunk_sz;
+ size -= chunk_sz;
+ }
+ chunk_sz = MIN(size, entry->size);
+ if (chunk_sz) {
+ bool nop = hax_mapping_is_opposite(entry, host_va, flags);
+ bool partial = chunk_sz < entry->size;
+ if (partial) {
+ /* remove the beginning of the existing chunk */
+ entry->start_pa += chunk_sz;
+ entry->host_va += chunk_sz;
+ entry->size -= chunk_sz;
+ if (!nop) {
+ hax_insert_mapping_before(entry, start_pa, chunk_sz,
+ host_va, flags);
+ }
+ } else { /* affects the full mapping entry */
+ if (nop) { /* no change to this mapping, remove it */
+ QTAILQ_REMOVE(&mappings, entry, entry);
+ g_free(entry);
+ } else { /* update mapping properties */
+ entry->host_va = host_va;
+ entry->flags = flags;
+ }
+ }
+ start_pa += chunk_sz;
+ host_va += chunk_sz;
+ size -= chunk_sz;
+ }
+ if (!size) { /* we are done */
+ break;
+ }
+ }
+ if (size) { /* add the leftover */
+ hax_insert_mapping_before(NULL, start_pa, size, host_va, flags);
+ }
+}
+
+static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
+{
+ MemoryRegion *mr = section->mr;
+ hwaddr start_pa = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ unsigned int delta;
+ uint64_t host_va;
+
+ /* We only care about RAM pages */
+ if (!memory_region_is_ram(mr)) {
+ return;
+ }
+
+ /* Adjust start_pa and size so that they are page-aligned. (Cf
+ * kvm_set_phys_mem() in kvm-all.c).
+ */
+ delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
+ delta &= ~qemu_real_host_page_mask;
+ if (delta > size) {
+ return;
+ }
+ start_pa += delta;
+ size -= delta;
+ size &= qemu_real_host_page_mask;
+ if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ return;
+ }
+
+ host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
+ + section->offset_within_region + delta;
+ if (memory_region_is_rom(section->mr)) {
+ flags |= HAX_RAM_INFO_ROM;
+ }
+
+ /* the kernel module interface uses 32-bit sizes (but we could split...) */
+ g_assert(size <= UINT32_MAX);
+
+ hax_update_mapping(start_pa, size, host_va, flags);
+}
+
+static void hax_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ memory_region_ref(section->mr);
+ hax_process_section(section, 0);
+}
+
+static void hax_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ hax_process_section(section, HAX_RAM_INFO_INVALID);
+ memory_region_unref(section->mr);
+}
+
+static void hax_transaction_begin(MemoryListener *listener)
+{
+ g_assert(QTAILQ_EMPTY(&mappings));
+}
+
+static void hax_transaction_commit(MemoryListener *listener)
+{
+ if (!QTAILQ_EMPTY(&mappings)) {
+ HAXMapping *entry, *next;
+
+ if (DEBUG_HAX_MEM) {
+ hax_mapping_dump_list();
+ }
+ QTAILQ_FOREACH_SAFE(entry, &mappings, entry, next) {
+ if (entry->flags & HAX_RAM_INFO_INVALID) {
+ /* for unmapping, put the values expected by the kernel */
+ entry->flags = HAX_RAM_INFO_INVALID;
+ entry->host_va = 0;
+ }
+ if (hax_set_ram(entry->start_pa, entry->size,
+ entry->host_va, entry->flags)) {
+ fprintf(stderr, "%s: Failed mapping @0x%016" PRIx64 "+0x%"
+ PRIx32 " flags %02x\n", __func__, entry->start_pa,
+ entry->size, entry->flags);
+ }
+ QTAILQ_REMOVE(&mappings, entry, entry);
+ g_free(entry);
+ }
+ }
+}
+
+/* currently we fake the dirty bitmap sync, always dirty */
+static void hax_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ MemoryRegion *mr = section->mr;
+
+ if (!memory_region_is_ram(mr)) {
+ /* Skip MMIO regions */
+ return;
+ }
+
+ memory_region_set_dirty(mr, 0, int128_get64(section->size));
+}
+
+static MemoryListener hax_memory_listener = {
+ .begin = hax_transaction_begin,
+ .commit = hax_transaction_commit,
+ .region_add = hax_region_add,
+ .region_del = hax_region_del,
+ .log_sync = hax_log_sync,
+ .priority = 10,
+};
+
+static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
+{
+ /*
+ * In HAX, QEMU allocates the virtual address, and HAX kernel
+ * populates the memory with physical memory. Currently we have no
+ * paging, so user should make sure enough free memory in advance.
+ */
+ if (hax_populate_ram((uint64_t)(uintptr_t)host, size) < 0) {
+ fprintf(stderr, "HAX failed to populate RAM");
+ abort();
+ }
+}
+
+static struct RAMBlockNotifier hax_ram_notifier = {
+ .ram_block_added = hax_ram_block_added,
+};
+
+void hax_memory_init(void)
+{
+ ram_block_notifier_add(&hax_ram_notifier);
+ memory_listener_register(&hax_memory_listener, &address_space_memory);
+}
diff --git a/target/i386/hax-windows.c b/target/i386/hax-windows.c
new file mode 100644
index 0000000000..15a180b646
--- /dev/null
+++ b/target/i386/hax-windows.c
@@ -0,0 +1,479 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hax-i386.h"
+
+/*
+ * return 0 when success, -1 when driver not loaded,
+ * other negative value for other failure
+ */
+static int hax_open_device(hax_fd *fd)
+{
+ uint32_t errNum = 0;
+ HANDLE hDevice;
+
+ if (!fd) {
+ return -2;
+ }
+
+ hDevice = CreateFile("\\\\.\\HAX",
+ GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+
+ if (hDevice == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Failed to open the HAX device!\n");
+ errNum = GetLastError();
+ if (errNum == ERROR_FILE_NOT_FOUND) {
+ return -1;
+ }
+ return -2;
+ }
+ *fd = hDevice;
+ return 0;
+}
+
+/* hax_fd hax_mod_open */
+ hax_fd hax_mod_open(void)
+{
+ int ret;
+ hax_fd fd = NULL;
+
+ ret = hax_open_device(&fd);
+ if (ret != 0) {
+ fprintf(stderr, "Open HAX device failed\n");
+ }
+
+ return fd;
+}
+
+int hax_populate_ram(uint64_t va, uint32_t size)
+{
+ int ret;
+ struct hax_alloc_ram_info info;
+ HANDLE hDeviceVM;
+ DWORD dSize = 0;
+
+ if (!hax_global.vm || !hax_global.vm->fd) {
+ fprintf(stderr, "Allocate memory before vm create?\n");
+ return -EINVAL;
+ }
+
+ info.size = size;
+ info.va = va;
+
+ hDeviceVM = hax_global.vm->fd;
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_ALLOC_RAM,
+ &info, sizeof(info), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ fprintf(stderr, "Failed to allocate %x memory\n", size);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
+{
+ struct hax_set_ram_info info;
+ HANDLE hDeviceVM = hax_global.vm->fd;
+ DWORD dSize = 0;
+ int ret;
+
+ info.pa_start = start_pa;
+ info.size = size;
+ info.va = host_va;
+ info.flags = (uint8_t) flags;
+
+ ret = DeviceIoControl(hDeviceVM, HAX_VM_IOCTL_SET_RAM,
+ &info, sizeof(info), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
+{
+ int ret;
+ HANDLE hDevice = hax->fd; /* handle to hax module */
+ DWORD dSize = 0;
+ DWORD err = 0;
+
+ if (hax_invalid_fd(hDevice)) {
+ fprintf(stderr, "Invalid fd for hax device!\n");
+ return -ENODEV;
+ }
+
+ ret = DeviceIoControl(hDevice, HAX_IOCTL_CAPABILITY, NULL, 0, cap,
+ sizeof(*cap), &dSize, (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ err = GetLastError();
+ if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) {
+ fprintf(stderr, "hax capability is too long to hold.\n");
+ }
+ fprintf(stderr, "Failed to get Hax capability:%luu\n", err);
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
+{
+ int ret;
+ HANDLE hDevice = hax->fd; /* handle to hax module */
+ DWORD dSize = 0;
+ DWORD err = 0;
+
+ if (hax_invalid_fd(hDevice)) {
+ fprintf(stderr, "Invalid fd for hax device!\n");
+ return -ENODEV;
+ }
+
+ ret = DeviceIoControl(hDevice,
+ HAX_IOCTL_VERSION,
+ NULL, 0,
+ version, sizeof(*version), &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ err = GetLastError();
+ if (err == ERROR_INSUFFICIENT_BUFFER || err == ERROR_MORE_DATA) {
+ fprintf(stderr, "hax module verion is too long to hold.\n");
+ }
+ fprintf(stderr, "Failed to get Hax module version:%lu\n", err);
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+static char *hax_vm_devfs_string(int vm_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID) {
+ fprintf(stderr, "Too big VM id\n");
+ return NULL;
+ }
+
+#define HAX_VM_DEVFS "\\\\.\\hax_vmxx"
+ name = g_strdup(HAX_VM_DEVFS);
+ if (!name) {
+ return NULL;
+ }
+
+ snprintf(name, sizeof HAX_VM_DEVFS, "\\\\.\\hax_vm%02d", vm_id);
+ return name;
+}
+
+static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID) {
+ fprintf(stderr, "Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
+ return NULL;
+ }
+
+#define HAX_VCPU_DEVFS "\\\\.\\hax_vmxx_vcpuxx"
+ name = g_strdup(HAX_VCPU_DEVFS);
+ if (!name) {
+ return NULL;
+ }
+
+ snprintf(name, sizeof HAX_VCPU_DEVFS, "\\\\.\\hax_vm%02d_vcpu%02d",
+ vm_id, vcpu_id);
+ return name;
+}
+
+int hax_host_create_vm(struct hax_state *hax, int *vmid)
+{
+ int ret;
+ int vm_id = 0;
+ DWORD dSize = 0;
+
+ if (hax_invalid_fd(hax->fd)) {
+ return -EINVAL;
+ }
+
+ if (hax->vm) {
+ return 0;
+ }
+
+ ret = DeviceIoControl(hax->fd,
+ HAX_IOCTL_CREATE_VM,
+ NULL, 0, &vm_id, sizeof(vm_id), &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to create VM. Error code: %lu\n",
+ GetLastError());
+ return -1;
+ }
+ *vmid = vm_id;
+ return 0;
+}
+
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
+{
+ char *vm_name = NULL;
+ hax_fd hDeviceVM;
+
+ vm_name = hax_vm_devfs_string(vm_id);
+ if (!vm_name) {
+ fprintf(stderr, "Failed to open VM. VM name is null\n");
+ return INVALID_HANDLE_VALUE;
+ }
+
+ hDeviceVM = CreateFile(vm_name,
+ GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (hDeviceVM == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Open the vm device error:%s, ec:%lu\n",
+ vm_name, GetLastError());
+ }
+
+ g_free(vm_name);
+ return hDeviceVM;
+}
+
+int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
+{
+ int ret;
+ DWORD dSize = 0;
+ if (hax_invalid_fd(vm_fd)) {
+ return -EINVAL;
+ }
+ ret = DeviceIoControl(vm_fd,
+ HAX_VM_IOCTL_NOTIFY_QEMU_VERSION,
+ qversion, sizeof(struct hax_qemu_version),
+ NULL, 0, &dSize, (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to notify qemu API version\n");
+ return -1;
+ }
+ return 0;
+}
+
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
+{
+ int ret;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(vm_fd,
+ HAX_VM_IOCTL_VCPU_CREATE,
+ &vcpuid, sizeof(vcpuid), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
+ return -1;
+ }
+
+ return 0;
+}
+
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
+{
+ char *devfs_path = NULL;
+ hax_fd hDeviceVCPU;
+
+ devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
+ if (!devfs_path) {
+ fprintf(stderr, "Failed to get the devfs\n");
+ return INVALID_HANDLE_VALUE;
+ }
+
+ hDeviceVCPU = CreateFile(devfs_path,
+ GENERIC_READ | GENERIC_WRITE,
+ 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (hDeviceVCPU == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Failed to open the vcpu devfs\n");
+ }
+ g_free(devfs_path);
+ return hDeviceVCPU;
+}
+
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+{
+ hax_fd hDeviceVCPU = vcpu->fd;
+ int ret;
+ struct hax_tunnel_info info;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SETUP_TUNNEL,
+ NULL, 0, &info, sizeof(info), &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ fprintf(stderr, "Failed to setup the hax tunnel\n");
+ return -1;
+ }
+
+ if (!valid_hax_tunnel_size(info.size)) {
+ fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
+ ret = -EINVAL;
+ return ret;
+ }
+ vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
+ vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
+ return 0;
+}
+
+int hax_vcpu_run(struct hax_vcpu_state *vcpu)
+{
+ int ret;
+ HANDLE hDeviceVCPU = vcpu->fd;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_RUN,
+ NULL, 0, NULL, 0, &dSize, (LPOVERLAPPED) NULL);
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize = 0;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+
+ hDeviceVCPU = fd;
+
+ if (set) {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SET_FPU,
+ fl, sizeof(*fl), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ } else {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_GET_FPU,
+ NULL, 0, fl, sizeof(*fl), &dSize,
+ (LPOVERLAPPED) NULL);
+ }
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize = 0;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+ hDeviceVCPU = fd;
+
+ if (set) {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SET_MSRS,
+ msrs, sizeof(*msrs),
+ msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL);
+ } else {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_GET_MSRS,
+ msrs, sizeof(*msrs),
+ msrs, sizeof(*msrs), &dSize, (LPOVERLAPPED) NULL);
+ }
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+
+ hDeviceVCPU = fd;
+
+ if (set) {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_SET_REGS,
+ state, sizeof(*state),
+ NULL, 0, &dSize, (LPOVERLAPPED) NULL);
+ } else {
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_GET_REGS,
+ NULL, 0,
+ state, sizeof(*state), &dSize,
+ (LPOVERLAPPED) NULL);
+ }
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+int hax_inject_interrupt(CPUArchState *env, int vector)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd)) {
+ return -1;
+ }
+
+ hDeviceVCPU = fd;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_INTERRUPT,
+ &vector, sizeof(vector), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
diff --git a/target/i386/hax-windows.h b/target/i386/hax-windows.h
new file mode 100644
index 0000000000..1d8f68de91
--- /dev/null
+++ b/target/i386/hax-windows.h
@@ -0,0 +1,89 @@
+/*
+ * QEMU HAXM support
+ *
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HAX_WINDOWS_H
+#define TARGET_I386_HAX_WINDOWS_H
+
+#include <windows.h>
+#include <memory.h>
+#include <malloc.h>
+#include <winioctl.h>
+#include <string.h>
+#include <stdio.h>
+#include <windef.h>
+
+#define HAX_INVALID_FD INVALID_HANDLE_VALUE
+
+static inline void hax_mod_close(struct hax_state *hax)
+{
+ CloseHandle(hax->fd);
+}
+
+static inline void hax_close_fd(hax_fd fd)
+{
+ CloseHandle(fd);
+}
+
+static inline int hax_invalid_fd(hax_fd fd)
+{
+ return (fd == INVALID_HANDLE_VALUE);
+}
+
+#define HAX_DEVICE_TYPE 0x4000
+
+#define HAX_IOCTL_VERSION CTL_CODE(HAX_DEVICE_TYPE, 0x900, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_IOCTL_CREATE_VM CTL_CODE(HAX_DEVICE_TYPE, 0x901, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_IOCTL_CAPABILITY CTL_CODE(HAX_DEVICE_TYPE, 0x910, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VM_IOCTL_VCPU_CREATE CTL_CODE(HAX_DEVICE_TYPE, 0x902, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_ALLOC_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x903, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_SET_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x904, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_VCPU_DESTROY CTL_CODE(HAX_DEVICE_TYPE, 0x905, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_RUN CTL_CODE(HAX_DEVICE_TYPE, 0x906, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_SET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x907, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_GET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x908, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_SET_FPU CTL_CODE(HAX_DEVICE_TYPE, 0x909, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_GET_FPU CTL_CODE(HAX_DEVICE_TYPE, 0x90a, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_SETUP_TUNNEL CTL_CODE(HAX_DEVICE_TYPE, 0x90b, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_INTERRUPT CTL_CODE(HAX_DEVICE_TYPE, 0x90c, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_SET_REGS CTL_CODE(HAX_DEVICE_TYPE, 0x90d, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_GET_REGS CTL_CODE(HAX_DEVICE_TYPE, 0x90e, \
+ METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION CTL_CODE(HAX_DEVICE_TYPE, 0x910, \
+ METHOD_BUFFERED, \
+ FILE_ANY_ACCESS)
+#endif /* TARGET_I386_HAX_WINDOWS_H */
diff --git a/target/i386/helper.c b/target/i386/helper.c
index 43e87ddba0..e2af3404f2 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -24,6 +24,7 @@
#include "kvm_i386.h"
#ifndef CONFIG_USER_ONLY
#include "sysemu/sysemu.h"
+#include "sysemu/hw_accel.h"
#include "monitor/monitor.h"
#include "hw/i386/apic_internal.h"
#endif
@@ -586,7 +587,7 @@ void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
/* when a20 is changed, all the MMU mappings are invalid, so
we must flush everything */
- tlb_flush(cs, 1);
+ tlb_flush(cs);
env->a20_mask = ~(1 << 20) | (a20_state << 20);
}
}
@@ -599,7 +600,7 @@ void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
(env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
#ifdef TARGET_X86_64
@@ -641,7 +642,7 @@ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
if (env->cr[0] & CR0_PG_MASK) {
qemu_log_mask(CPU_LOG_MMU,
"CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
- tlb_flush(CPU(cpu), 0);
+ tlb_flush(CPU(cpu));
}
}
@@ -656,7 +657,7 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
if ((new_cr4 ^ env->cr[4]) &
(CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
/* Clear bits we're going to recompute. */
diff --git a/target/i386/helper.h b/target/i386/helper.h
index bd9b2cf677..4c1aafffd6 100644
--- a/target/i386/helper.h
+++ b/target/i386/helper.h
@@ -202,8 +202,6 @@ DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32)
DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64)
-DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c
index 39a230f119..8545574568 100644
--- a/target/i386/hyperv.c
+++ b/target/i386/hyperv.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "hyperv.h"
#include "standard-headers/asm-x86/hyperv.h"
@@ -88,7 +89,7 @@ HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
goto err_sint_set_notifier;
}
- event_notifier_set_handler(&sint_route->sint_ack_notifier, false,
+ event_notifier_set_handler(&sint_route->sint_ack_notifier,
kvm_hv_sint_ack_handler);
gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vcpu_id, sint);
@@ -112,7 +113,7 @@ HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
err_irqfd:
kvm_irqchip_release_virq(kvm_state, gsi);
err_gsi:
- event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
event_notifier_cleanup(&sint_route->sint_ack_notifier);
err_sint_set_notifier:
event_notifier_cleanup(&sint_route->sint_set_notifier);
@@ -128,7 +129,7 @@ void kvm_hv_sint_route_destroy(HvSintRoute *sint_route)
&sint_route->sint_set_notifier,
sint_route->gsi);
kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
- event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL);
event_notifier_cleanup(&sint_route->sint_ack_notifier);
event_notifier_cleanup(&sint_route->sint_set_notifier);
g_free(sint_route);
diff --git a/target/i386/int_helper.c b/target/i386/int_helper.c
index 9e873ac150..4dc5c65991 100644
--- a/target/i386/int_helper.c
+++ b/target/i386/int_helper.c
@@ -417,17 +417,6 @@ void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
# define clztl clz64
#endif
-/* bit operations */
-target_ulong helper_ctz(target_ulong t0)
-{
- return ctztl(t0);
-}
-
-target_ulong helper_clz(target_ulong t0)
-{
- return clztl(t0);
-}
-
target_ulong helper_pdep(target_ulong src, target_ulong mask)
{
target_ulong dest = 0;
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 10a9cd8f7f..8e130ccf9c 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -23,6 +23,7 @@
#include "qemu-common.h"
#include "cpu.h"
#include "sysemu/sysemu.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/kvm_int.h"
#include "kvm_i386.h"
#include "hyperv.h"
@@ -709,6 +710,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
uint32_t signature[3];
int kvm_base = KVM_CPUID_SIGNATURE;
int r;
+ Error *local_err = NULL;
memset(&cpuid_data, 0, sizeof(cpuid_data));
@@ -962,26 +964,33 @@ int kvm_arch_init_vcpu(CPUState *cs)
has_msr_mcg_ext_ctl = has_msr_feature_control = true;
}
- c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
- if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
- /* for migration */
- error_setg(&invtsc_mig_blocker,
- "State blocked by non-migratable CPU device"
- " (invtsc flag)");
- migrate_add_blocker(invtsc_mig_blocker);
- /* for savevm */
- vmstate_x86_cpu.unmigratable = 1;
+ if (!env->user_tsc_khz) {
+ if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
+ invtsc_mig_blocker == NULL) {
+ /* for migration */
+ error_setg(&invtsc_mig_blocker,
+ "State blocked by non-migratable CPU device"
+ " (invtsc flag)");
+ r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ error_free(invtsc_mig_blocker);
+ goto fail;
+ }
+ /* for savevm */
+ vmstate_x86_cpu.unmigratable = 1;
+ }
}
cpuid_data.cpuid.padding = 0;
r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
if (r) {
- return r;
+ goto fail;
}
r = kvm_arch_set_tsc_khz(cs);
if (r < 0) {
- return r;
+ goto fail;
}
/* vcpu's TSC frequency is either specified by user, or following
@@ -1008,6 +1017,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
return 0;
+
+ fail:
+ migrate_del_blocker(invtsc_mig_blocker);
+ return r;
}
void kvm_arch_reset_vcpu(X86CPU *cpu)
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 760f82b6c7..78ae2f986b 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -136,10 +136,12 @@ static const VMStateDescription vmstate_mtrr_var = {
#define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
-static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
+static int put_fpreg_error(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
fprintf(stderr, "call put_fpreg() with invalid arguments\n");
exit(0);
+ return 0;
}
/* XXX: add that in a FPU generic layer */
@@ -164,7 +166,8 @@ static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
p->exp = e;
}
-static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
+static int get_fpreg(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
FPReg *fp_reg = opaque;
uint64_t mant;
@@ -176,7 +179,8 @@ static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
return 0;
}
-static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
+static int put_fpreg(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
FPReg *fp_reg = opaque;
uint64_t mant;
@@ -186,6 +190,8 @@ static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
cpu_get_fp80(&mant, &exp, fp_reg->d);
qemu_put_be64s(f, &mant);
qemu_put_be16s(f, &exp);
+
+ return 0;
}
static const VMStateInfo vmstate_fpreg = {
@@ -194,7 +200,8 @@ static const VMStateInfo vmstate_fpreg = {
.put = put_fpreg,
};
-static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
+static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
union x86_longdouble *p = opaque;
uint64_t mant;
@@ -211,7 +218,8 @@ static const VMStateInfo vmstate_fpreg_1_mmx = {
.put = put_fpreg_error,
};
-static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
+static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size,
+ VMStateField *field)
{
union x86_longdouble *p = opaque;
uint64_t mant;
@@ -273,17 +281,21 @@ static bool less_than_7(void *opaque, int version_id)
return version_id < 7;
}
-static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
+static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field)
{
uint64_t *v = pv;
*v = qemu_get_be32(f);
return 0;
}
-static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
+static int put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size,
+ VMStateField *field, QJSON *vmdesc)
{
uint64_t *v = pv;
qemu_put_be32(f, *v);
+
+ return 0;
}
static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
@@ -387,7 +399,7 @@ static int cpu_post_load(void *opaque, int version_id)
env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
cpu_x86_update_dr7(env, dr7);
}
- tlb_flush(cs, 1);
+ tlb_flush(cs);
if (tcg_enabled()) {
cpu_smm_update(cpu);
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
index 3f666b4b87..5029efef47 100644
--- a/target/i386/misc_helper.c
+++ b/target/i386/misc_helper.c
@@ -635,5 +635,5 @@ void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
}
env->pkru = val;
- tlb_flush(cs, 1);
+ tlb_flush(cs);
}
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
index 7a98f53864..16509d0a74 100644
--- a/target/i386/ops_sse.h
+++ b/target/i386/ops_sse.h
@@ -2157,32 +2157,6 @@ target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
return crc;
}
-#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
-#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i)))
-target_ulong helper_popcnt(CPUX86State *env, target_ulong n, uint32_t type)
-{
- CC_SRC = n ? 0 : CC_Z;
-
- n = POPCOUNT(n, 0);
- n = POPCOUNT(n, 1);
- n = POPCOUNT(n, 2);
- n = POPCOUNT(n, 3);
- if (type == 1) {
- return n & 0xff;
- }
-
- n = POPCOUNT(n, 4);
-#ifndef TARGET_X86_64
- return n;
-#else
- if (type == 2) {
- return n & 0xff;
- }
-
- return POPCOUNT(n, 5);
-#endif
-}
-
void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
uint32_t ctrl)
{
diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h
index 64c5857cf4..094aafc573 100644
--- a/target/i386/ops_sse_header.h
+++ b/target/i386/ops_sse_header.h
@@ -333,7 +333,6 @@ DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32)
DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32)
DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32)
DEF_HELPER_3(crc32, tl, i32, tl, i32)
-DEF_HELPER_3(popcnt, tl, env, tl, i32)
#endif
/* AES-NI op helpers */
diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c
index 782b3f12f0..210f6aa7b5 100644
--- a/target/i386/svm_helper.c
+++ b/target/i386/svm_helper.c
@@ -289,7 +289,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
break;
case TLB_CONTROL_FLUSH_ALL_ASID:
/* FIXME: this is not 100% correct but should work for now */
- tlb_flush(cs, 1);
+ tlb_flush(cs);
break;
}
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 59e11fcd1f..72c1b03a2a 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -222,6 +222,7 @@ static const uint8_t cc_op_live[CC_OP_NB] = {
[CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
[CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
[CC_OP_CLR] = 0,
+ [CC_OP_POPCNT] = USES_CC_SRC,
};
static void set_cc_op(DisasContext *s, CCOp op)
@@ -383,8 +384,7 @@ static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
{
if (ot == MO_8 && byte_reg_is_xH(reg)) {
- tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
- tcg_gen_ext8u_tl(t0, t0);
+ tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
} else {
tcg_gen_mov_tl(t0, cpu_regs[reg]);
}
@@ -758,6 +758,7 @@ static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
case CC_OP_LOGICB ... CC_OP_LOGICQ:
case CC_OP_CLR:
+ case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
case CC_OP_INCB ... CC_OP_INCQ:
@@ -825,6 +826,7 @@ static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
.mask = CC_S };
case CC_OP_CLR:
+ case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
default:
{
@@ -844,6 +846,7 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
.mask = -1, .no_setcond = true };
case CC_OP_CLR:
+ case CC_OP_POPCNT:
return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
default:
gen_compute_eflags(s);
@@ -867,6 +870,9 @@ static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
.mask = CC_Z };
case CC_OP_CLR:
return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
+ case CC_OP_POPCNT:
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
+ .mask = -1 };
default:
{
TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
@@ -3768,8 +3774,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
/* Extract the LEN into a mask. Lengths larger than
operand size get all ones. */
- tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
- tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
+ tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
cpu_A0, bound);
tcg_temp_free(bound);
@@ -3920,9 +3925,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_compute_eflags(s);
}
carry_in = cpu_tmp0;
- tcg_gen_shri_tl(carry_in, cpu_cc_src,
- ctz32(b == 0x1f6 ? CC_C : CC_O));
- tcg_gen_andi_tl(carry_in, carry_in, 1);
+ tcg_gen_extract_tl(carry_in, cpu_cc_src,
+ ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
}
switch (ot) {
@@ -5447,21 +5451,25 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
- gen_op_mov_v_reg(ot, cpu_T0, rm);
- switch (s_ot) {
- case MO_UB:
- tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
- break;
- case MO_SB:
- tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
- break;
- case MO_UW:
- tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
- break;
- default:
- case MO_SW:
- tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
- break;
+ if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
+ tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ switch (s_ot) {
+ case MO_UB:
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ break;
+ case MO_SB:
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ break;
+ case MO_UW:
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ break;
+ default:
+ case MO_SW:
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ break;
+ }
}
gen_op_mov_reg_v(d_ot, reg, cpu_T0);
} else {
@@ -6435,10 +6443,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
}
- /* TF handling for the syscall insn is different. The TF bit is checked
- after the syscall insn completes. This allows #DB to not be
- generated after one has entered CPL0 if TF is set in FMASK. */
- gen_eob_worker(s, false, true);
+ gen_eob(s);
break;
case 0xe8: /* call im */
{
@@ -6806,21 +6811,18 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
+ /* For lzcnt/tzcnt, C bit is defined related to the input. */
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
- gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
} else {
- /* For tzcnt, a zero input must return the operand size:
- force all bits outside the operand size to 1. */
- target_ulong mask = (target_ulong)-2 << (size - 1);
- tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
- gen_helper_ctz(cpu_T0, cpu_T0);
- }
- /* For lzcnt/tzcnt, C and Z bits are defined and are
- related to the result. */
+ /* For tzcnt, a zero input must return the operand size. */
+ tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
+ }
+ /* For lzcnt/tzcnt, Z bit is defined related to the result. */
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
} else {
@@ -6828,20 +6830,20 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
to the input and not the result. */
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_LOGICB + ot);
+
+ /* ??? The manual says that the output is undefined when the
+ input is zero, but real hardware leaves it unchanged, and
+ real programs appear to depend on that. Accomplish this
+ by passing the output as the value to return upon zero. */
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
- gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
+ tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
} else {
- gen_helper_ctz(cpu_T0, cpu_T0);
+ tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
}
- /* ??? The manual says that the output is undefined when the
- input is zero, but real hardware leaves it unchanged, and
- real programs appear to depend on that. */
- tcg_gen_movi_tl(cpu_tmp0, 0);
- tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
- cpu_regs[reg], cpu_T0);
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
@@ -7119,7 +7121,10 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
- gen_eob(s);
+ /* TF handling for the syscall insn is different. The TF bit is checked
+ after the syscall insn completes. This allows #DB to not be
+ generated after one has entered CPL0 if TF is set in FMASK. */
+ gen_eob_worker(s, false, true);
break;
case 0x107: /* sysret */
if (!s->pe) {
@@ -8207,10 +8212,12 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
- gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
+ gen_extu(ot, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
- set_cc_op(s, CC_OP_EFLAGS);
+ set_cc_op(s, CC_OP_POPCNT);
break;
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
index 8d939a7779..2b8c36b6d0 100644
--- a/target/lm32/cpu.c
+++ b/target/lm32/cpu.c
@@ -128,10 +128,9 @@ static void lm32_cpu_reset(CPUState *s)
lcc->parent_reset(s);
/* reset cpu state */
- memset(env, 0, offsetof(CPULM32State, eba));
+ memset(env, 0, offsetof(CPULM32State, end_reset_fields));
lm32_cpu_init_cfg_reg(cpu);
- tlb_flush(s, 1);
}
static void lm32_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
diff --git a/target/lm32/cpu.h b/target/lm32/cpu.h
index d8a3515244..1d972cb26b 100644
--- a/target/lm32/cpu.h
+++ b/target/lm32/cpu.h
@@ -165,6 +165,9 @@ struct CPULM32State {
struct CPUBreakpoint *cpu_breakpoint[4];
struct CPUWatchpoint *cpu_watchpoint[4];
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index ba17480098..fa10b6e4cd 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -52,7 +52,7 @@ static void m68k_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUM68KState, features));
+ memset(env, 0, offsetof(CPUM68KState, end_reset_fields));
#if !defined(CONFIG_USER_ONLY)
env->sr = 0x2700;
#endif
@@ -61,7 +61,6 @@ static void m68k_cpu_reset(CPUState *s)
cpu_m68k_set_ccr(env, 0);
/* TODO: We should set PC from the interrupt vector. */
env->pc = 0;
- tlb_flush(s, 1);
}
static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info)
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index 6dfb54eb70..809582212d 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -37,6 +37,7 @@
#define OS_DOUBLE 4
#define OS_EXTENDED 5
#define OS_PACKED 6
+#define OS_UNSIZED 7
#define MAX_QREGS 32
@@ -95,10 +96,6 @@ typedef struct CPUM68KState {
uint32_t macsr;
uint32_t mac_mask;
- /* Temporary storage for DIV helpers. */
- uint32_t div1;
- uint32_t div2;
-
/* MMU status. */
struct {
uint32_t ar;
@@ -115,6 +112,9 @@ typedef struct CPUM68KState {
uint32_t qregs[MAX_QREGS];
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 7aed9ffd2f..f750d3dbaa 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -284,58 +284,6 @@ void HELPER(set_sr)(CPUM68KState *env, uint32_t val)
m68k_switch_sp(env);
}
-uint32_t HELPER(shl_cc)(CPUM68KState *env, uint32_t val, uint32_t shift)
-{
- uint64_t result;
-
- shift &= 63;
- result = (uint64_t)val << shift;
-
- env->cc_c = (result >> 32) & 1;
- env->cc_n = result;
- env->cc_z = result;
- env->cc_v = 0;
- env->cc_x = shift ? env->cc_c : env->cc_x;
-
- return result;
-}
-
-uint32_t HELPER(shr_cc)(CPUM68KState *env, uint32_t val, uint32_t shift)
-{
- uint64_t temp;
- uint32_t result;
-
- shift &= 63;
- temp = (uint64_t)val << 32 >> shift;
- result = temp >> 32;
-
- env->cc_c = (temp >> 31) & 1;
- env->cc_n = result;
- env->cc_z = result;
- env->cc_v = 0;
- env->cc_x = shift ? env->cc_c : env->cc_x;
-
- return result;
-}
-
-uint32_t HELPER(sar_cc)(CPUM68KState *env, uint32_t val, uint32_t shift)
-{
- uint64_t temp;
- uint32_t result;
-
- shift &= 63;
- temp = (int64_t)val << 32 >> shift;
- result = temp >> 32;
-
- env->cc_c = (temp >> 31) & 1;
- env->cc_n = result;
- env->cc_z = result;
- env->cc_v = result ^ val;
- env->cc_x = shift ? env->cc_c : env->cc_x;
-
- return result;
-}
-
/* FPU helpers. */
uint32_t HELPER(f64_to_i32)(CPUM68KState *env, float64 val)
{
diff --git a/target/m68k/helper.h b/target/m68k/helper.h
index 2697e32d0b..d7a4bf1db5 100644
--- a/target/m68k/helper.h
+++ b/target/m68k/helper.h
@@ -1,13 +1,16 @@
DEF_HELPER_1(bitrev, i32, i32)
DEF_HELPER_1(ff1, i32, i32)
DEF_HELPER_FLAGS_2(sats, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-DEF_HELPER_2(divu, void, env, i32)
-DEF_HELPER_2(divs, void, env, i32)
-DEF_HELPER_3(shl_cc, i32, env, i32, i32)
-DEF_HELPER_3(shr_cc, i32, env, i32, i32)
-DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(divuw, void, env, int, i32)
+DEF_HELPER_3(divsw, void, env, int, s32)
+DEF_HELPER_4(divul, void, env, int, int, i32)
+DEF_HELPER_4(divsl, void, env, int, int, s32)
+DEF_HELPER_4(divull, void, env, int, int, i32)
+DEF_HELPER_4(divsll, void, env, int, int, s32)
DEF_HELPER_2(set_sr, void, env, i32)
DEF_HELPER_3(movec, void, env, i32, i32)
+DEF_HELPER_4(cas2w, void, env, i32, i32, i32)
+DEF_HELPER_4(cas2l, void, env, i32, i32, i32)
DEF_HELPER_2(f64_to_i32, f32, env, f64)
DEF_HELPER_2(f64_to_f32, f32, env, f64)
@@ -47,3 +50,13 @@ DEF_HELPER_2(flush_flags, void, env, i32)
DEF_HELPER_2(set_ccr, void, env, i32)
DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env)
DEF_HELPER_2(raise_exception, void, env, i32)
+
+DEF_HELPER_FLAGS_3(bfffo_reg, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(bfexts_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfextu_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)
+DEF_HELPER_FLAGS_5(bfins_mem, TCG_CALL_NO_WG, i32, env, i32, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfchg_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfclr_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfset_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
+DEF_HELPER_FLAGS_4(bfffo_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index 48e02e4062..7b5126c88d 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -166,12 +166,17 @@ bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
return false;
}
-static void raise_exception(CPUM68KState *env, int tt)
+static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
{
CPUState *cs = CPU(m68k_env_get_cpu(env));
cs->exception_index = tt;
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+static void raise_exception(CPUM68KState *env, int tt)
+{
+ raise_exception_ra(env, tt, 0);
}
void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
@@ -179,51 +184,494 @@ void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
raise_exception(env, tt);
}
-void HELPER(divu)(CPUM68KState *env, uint32_t word)
+void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den)
{
- uint32_t num;
- uint32_t den;
- uint32_t quot;
- uint32_t rem;
+ uint32_t num = env->dregs[destr];
+ uint32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot > 0xffff) {
+ env->cc_v = -1;
+ /* real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->dregs[destr] = deposit32(quot, 16, 16, rem);
+ env->cc_z = (int16_t)quot;
+ env->cc_n = (int16_t)quot;
+ env->cc_v = 0;
+}
+
+void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den)
+{
+ int32_t num = env->dregs[destr];
+ uint32_t quot, rem;
- num = env->div1;
- den = env->div2;
- /* ??? This needs to make sure the throwing location is accurate. */
if (den == 0) {
- raise_exception(env, EXCP_DIV0);
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
}
quot = num / den;
rem = num % den;
- env->cc_v = (word && quot > 0xffff ? -1 : 0);
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot != (int16_t)quot) {
+ env->cc_v = -1;
+ /* nothing else is modified */
+ /* real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->dregs[destr] = deposit32(quot, 16, 16, rem);
+ env->cc_z = (int16_t)quot;
+ env->cc_n = (int16_t)quot;
+ env->cc_v = 0;
+}
+
+void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den)
+{
+ uint32_t num = env->dregs[numr];
+ uint32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0;
env->cc_z = quot;
env->cc_n = quot;
+ env->cc_v = 0;
+
+ if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
+ if (numr == regr) {
+ env->dregs[numr] = quot;
+ } else {
+ env->dregs[regr] = rem;
+ }
+ } else {
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+ }
+}
+
+void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den)
+{
+ int32_t num = env->dregs[numr];
+ int32_t quot, rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
env->cc_c = 0;
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_v = 0;
- env->div1 = quot;
- env->div2 = rem;
+ if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
+ if (numr == regr) {
+ env->dregs[numr] = quot;
+ } else {
+ env->dregs[regr] = rem;
+ }
+ } else {
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+ }
}
-void HELPER(divs)(CPUM68KState *env, uint32_t word)
+void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den)
{
- int32_t num;
- int32_t den;
- int32_t quot;
+ uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
+ uint64_t quot;
+ uint32_t rem;
+
+ if (den == 0) {
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot > 0xffffffffULL) {
+ env->cc_v = -1;
+ /* real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_v = 0;
+
+ /*
+ * If Dq and Dr are the same, the quotient is returned.
+ * therefore we set Dq last.
+ */
+
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+}
+
+void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
+{
+ int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
+ int64_t quot;
int32_t rem;
- num = env->div1;
- den = env->div2;
if (den == 0) {
- raise_exception(env, EXCP_DIV0);
+ raise_exception_ra(env, EXCP_DIV0, GETPC());
}
quot = num / den;
rem = num % den;
- env->cc_v = (word && quot != (int16_t)quot ? -1 : 0);
+ env->cc_c = 0; /* always cleared, even if overflow */
+ if (quot != (int32_t)quot) {
+ env->cc_v = -1;
+ /* real 68040 keeps N and unset Z on overflow,
+ * whereas documentation says "undefined"
+ */
+ env->cc_z = 1;
+ return;
+ }
env->cc_z = quot;
env->cc_n = quot;
- env->cc_c = 0;
+ env->cc_v = 0;
+
+ /*
+ * If Dq and Dr are the same, the quotient is returned.
+ * therefore we set Dq last.
+ */
+
+ env->dregs[regr] = rem;
+ env->dregs[numr] = quot;
+}
+
+void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
+{
+ uint32_t Dc1 = extract32(regs, 9, 3);
+ uint32_t Dc2 = extract32(regs, 6, 3);
+ uint32_t Du1 = extract32(regs, 3, 3);
+ uint32_t Du2 = extract32(regs, 0, 3);
+ int16_t c1 = env->dregs[Dc1];
+ int16_t c2 = env->dregs[Dc2];
+ int16_t u1 = env->dregs[Du1];
+ int16_t u2 = env->dregs[Du2];
+ int16_t l1, l2;
+ uintptr_t ra = GETPC();
+
+ if (parallel_cpus) {
+ /* Tell the main loop we need to serialize this insn. */
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+ } else {
+ /* We're executing in a serial context -- no need to be atomic. */
+ l1 = cpu_lduw_data_ra(env, a1, ra);
+ l2 = cpu_lduw_data_ra(env, a2, ra);
+ if (l1 == c1 && l2 == c2) {
+ cpu_stw_data_ra(env, a1, u1, ra);
+ cpu_stw_data_ra(env, a2, u2, ra);
+ }
+ }
+
+ if (c1 != l1) {
+ env->cc_n = l1;
+ env->cc_v = c1;
+ } else {
+ env->cc_n = l2;
+ env->cc_v = c2;
+ }
+ env->cc_op = CC_OP_CMPW;
+ env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1);
+ env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
+}
- env->div1 = quot;
- env->div2 = rem;
+void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
+{
+ uint32_t Dc1 = extract32(regs, 9, 3);
+ uint32_t Dc2 = extract32(regs, 6, 3);
+ uint32_t Du1 = extract32(regs, 3, 3);
+ uint32_t Du2 = extract32(regs, 0, 3);
+ uint32_t c1 = env->dregs[Dc1];
+ uint32_t c2 = env->dregs[Dc2];
+ uint32_t u1 = env->dregs[Du1];
+ uint32_t u2 = env->dregs[Du2];
+ uint32_t l1, l2;
+ uintptr_t ra = GETPC();
+#if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY)
+ int mmu_idx = cpu_mmu_index(env, 0);
+ TCGMemOpIdx oi;
+#endif
+
+ if (parallel_cpus) {
+ /* We're executing in a parallel context -- must be atomic. */
+#ifdef CONFIG_ATOMIC64
+ uint64_t c, u, l;
+ if ((a1 & 7) == 0 && a2 == a1 + 4) {
+ c = deposit64(c2, 32, 32, c1);
+ u = deposit64(u2, 32, 32, u1);
+#ifdef CONFIG_USER_ONLY
+ l = helper_atomic_cmpxchgq_be(env, a1, c, u);
+#else
+ oi = make_memop_idx(MO_BEQ, mmu_idx);
+ l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
+#endif
+ l1 = l >> 32;
+ l2 = l;
+ } else if ((a2 & 7) == 0 && a1 == a2 + 4) {
+ c = deposit64(c1, 32, 32, c2);
+ u = deposit64(u1, 32, 32, u2);
+#ifdef CONFIG_USER_ONLY
+ l = helper_atomic_cmpxchgq_be(env, a2, c, u);
+#else
+ oi = make_memop_idx(MO_BEQ, mmu_idx);
+ l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
+#endif
+ l2 = l >> 32;
+ l1 = l;
+ } else
+#endif
+ {
+ /* Tell the main loop we need to serialize this insn. */
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+ }
+ } else {
+ /* We're executing in a serial context -- no need to be atomic. */
+ l1 = cpu_ldl_data_ra(env, a1, ra);
+ l2 = cpu_ldl_data_ra(env, a2, ra);
+ if (l1 == c1 && l2 == c2) {
+ cpu_stl_data_ra(env, a1, u1, ra);
+ cpu_stl_data_ra(env, a2, u2, ra);
+ }
+ }
+
+ if (c1 != l1) {
+ env->cc_n = l1;
+ env->cc_v = c1;
+ } else {
+ env->cc_n = l2;
+ env->cc_v = c2;
+ }
+ env->cc_op = CC_OP_CMPL;
+ env->dregs[Dc1] = l1;
+ env->dregs[Dc2] = l2;
+}
+
+struct bf_data {
+ uint32_t addr;
+ uint32_t bofs;
+ uint32_t blen;
+ uint32_t len;
+};
+
+static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len)
+{
+ int bofs, blen;
+
+ /* Bound length; map 0 to 32. */
+ len = ((len - 1) & 31) + 1;
+
+ /* Note that ofs is signed. */
+ addr += ofs / 8;
+ bofs = ofs % 8;
+ if (bofs < 0) {
+ bofs += 8;
+ addr -= 1;
+ }
+
+ /* Compute the number of bytes required (minus one) to
+ satisfy the bitfield. */
+ blen = (bofs + len - 1) / 8;
+
+ /* Canonicalize the bit offset for data loaded into a 64-bit big-endian
+ word. For the cases where BLEN is not a power of 2, adjust ADDR so
+ that we can use the next power of two sized load without crossing a
+ page boundary, unless the field itself crosses the boundary. */
+ switch (blen) {
+ case 0:
+ bofs += 56;
+ break;
+ case 1:
+ bofs += 48;
+ break;
+ case 2:
+ if (addr & 1) {
+ bofs += 8;
+ addr -= 1;
+ }
+ /* fallthru */
+ case 3:
+ bofs += 32;
+ break;
+ case 4:
+ if (addr & 3) {
+ bofs += 8 * (addr & 3);
+ addr &= -4;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ return (struct bf_data){
+ .addr = addr,
+ .bofs = bofs,
+ .blen = blen,
+ .len = len,
+ };
+}
+
+static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen,
+ uintptr_t ra)
+{
+ switch (blen) {
+ case 0:
+ return cpu_ldub_data_ra(env, addr, ra);
+ case 1:
+ return cpu_lduw_data_ra(env, addr, ra);
+ case 2:
+ case 3:
+ return cpu_ldl_data_ra(env, addr, ra);
+ case 4:
+ return cpu_ldq_data_ra(env, addr, ra);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void bf_store(CPUM68KState *env, uint32_t addr, int blen,
+ uint64_t data, uintptr_t ra)
+{
+ switch (blen) {
+ case 0:
+ cpu_stb_data_ra(env, addr, data, ra);
+ break;
+ case 1:
+ cpu_stw_data_ra(env, addr, data, ra);
+ break;
+ case 2:
+ case 3:
+ cpu_stl_data_ra(env, addr, data, ra);
+ break;
+ case 4:
+ cpu_stq_data_ra(env, addr, data, ra);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+
+ return (int64_t)(data << d.bofs) >> (64 - d.len);
+}
+
+uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+
+ /* Put CC_N at the top of the high word; put the zero-extended value
+ at the bottom of the low word. */
+ data <<= d.bofs;
+ data >>= 64 - d.len;
+ data |= data << (64 - d.len);
+
+ return data;
+}
+
+uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs);
+
+ bf_store(env, d.addr, d.blen, data, ra);
+
+ /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */
+ return val << (32 - d.len);
+}
+
+uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ bf_store(env, d.addr, d.blen, data ^ mask, ra);
+
+ return ((data & mask) << d.bofs) >> 32;
+}
+
+uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ bf_store(env, d.addr, d.blen, data & ~mask, ra);
+
+ return ((data & mask) << d.bofs) >> 32;
+}
+
+uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+
+ bf_store(env, d.addr, d.blen, data | mask, ra);
+
+ return ((data & mask) << d.bofs) >> 32;
+}
+
+uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len)
+{
+ return (n ? clz32(n) : len) + ofs;
+}
+
+uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
+ int32_t ofs, uint32_t len)
+{
+ uintptr_t ra = GETPC();
+ struct bf_data d = bf_prep(addr, ofs, len);
+ uint64_t data = bf_load(env, d.addr, d.blen, ra);
+ uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
+ uint64_t n = (data & mask) << d.bofs;
+ uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len);
+
+ /* Return FFO in the low word and N in the high word.
+ Note that because of MASK and the shift, the low word
+ is already zero. */
+ return n | ffo;
}
diff --git a/target/m68k/qregs.def b/target/m68k/qregs.def
index 156c0f558f..51ff43bf33 100644
--- a/target/m68k/qregs.def
+++ b/target/m68k/qregs.def
@@ -7,7 +7,5 @@ DEFO32(CC_C, cc_c)
DEFO32(CC_N, cc_n)
DEFO32(CC_V, cc_v)
DEFO32(CC_Z, cc_z)
-DEFO32(DIV1, div1)
-DEFO32(DIV2, div2)
DEFO32(MACSR, macsr)
DEFO32(MAC_MASK, mac_mask)
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index d6ed883882..9f60fbc0db 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -59,12 +59,12 @@ static TCGv cpu_aregs[8];
static TCGv_i64 cpu_fregs[8];
static TCGv_i64 cpu_macc[4];
-#define REG(insn, pos) (((insn) >> (pos)) & 7)
+#define REG(insn, pos) (((insn) >> (pos)) & 7)
#define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
-#define AREG(insn, pos) cpu_aregs[REG(insn, pos)]
+#define AREG(insn, pos) get_areg(s, REG(insn, pos))
#define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
-#define MACREG(acc) cpu_macc[acc]
-#define QREG_SP cpu_aregs[7]
+#define MACREG(acc) cpu_macc[acc]
+#define QREG_SP get_areg(s, 7)
static TCGv NULL_QREG;
#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
@@ -141,8 +141,55 @@ typedef struct DisasContext {
int singlestep_enabled;
TCGv_i64 mactmp;
int done_mac;
+ int writeback_mask;
+ TCGv writeback[8];
} DisasContext;
+static TCGv get_areg(DisasContext *s, unsigned regno)
+{
+ if (s->writeback_mask & (1 << regno)) {
+ return s->writeback[regno];
+ } else {
+ return cpu_aregs[regno];
+ }
+}
+
+static void delay_set_areg(DisasContext *s, unsigned regno,
+ TCGv val, bool give_temp)
+{
+ if (s->writeback_mask & (1 << regno)) {
+ if (give_temp) {
+ tcg_temp_free(s->writeback[regno]);
+ s->writeback[regno] = val;
+ } else {
+ tcg_gen_mov_i32(s->writeback[regno], val);
+ }
+ } else {
+ s->writeback_mask |= 1 << regno;
+ if (give_temp) {
+ s->writeback[regno] = val;
+ } else {
+ TCGv tmp = tcg_temp_new();
+ s->writeback[regno] = tmp;
+ tcg_gen_mov_i32(tmp, val);
+ }
+ }
+}
+
+static void do_writebacks(DisasContext *s)
+{
+ unsigned mask = s->writeback_mask;
+ if (mask) {
+ s->writeback_mask = 0;
+ do {
+ unsigned regno = ctz32(mask);
+ tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
+ tcg_temp_free(s->writeback[regno]);
+ mask &= mask - 1;
+ } while (mask);
+ }
+}
+
#define DISAS_JUMP_NEXT 4
#if defined(CONFIG_USER_ONLY)
@@ -331,7 +378,7 @@ static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
}
/* Calculate and address index. */
-static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
+static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
{
TCGv add;
int scale;
@@ -388,7 +435,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
tmp = tcg_temp_new();
if ((ext & 0x44) == 0) {
/* pre-index */
- add = gen_addr_index(ext, tmp);
+ add = gen_addr_index(s, ext, tmp);
} else {
add = NULL_QREG;
}
@@ -417,7 +464,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
/* memory indirect */
base = gen_load(s, OS_LONG, add, 0);
if ((ext & 0x44) == 4) {
- add = gen_addr_index(ext, tmp);
+ add = gen_addr_index(s, ext, tmp);
tcg_gen_add_i32(tmp, add, base);
add = tmp;
} else {
@@ -441,7 +488,7 @@ static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
} else {
/* brief extension word format */
tmp = tcg_temp_new();
- add = gen_addr_index(ext, tmp);
+ add = gen_addr_index(s, ext, tmp);
if (!IS_NULL_QREG(base)) {
tcg_gen_add_i32(tmp, add, base);
if ((int8_t)ext)
@@ -548,18 +595,19 @@ static void gen_flush_flags(DisasContext *s)
case CC_OP_DYNAMIC:
gen_helper_flush_flags(cpu_env, QREG_CC_OP);
+ s->cc_op_synced = 1;
break;
default:
t0 = tcg_const_i32(s->cc_op);
gen_helper_flush_flags(cpu_env, t0);
tcg_temp_free(t0);
+ s->cc_op_synced = 1;
break;
}
/* Note that flush_flags also assigned to env->cc_op. */
s->cc_op = CC_OP_FLAGS;
- s->cc_op_synced = 1;
}
static inline TCGv gen_extend(TCGv val, int opsize, int sign)
@@ -632,12 +680,14 @@ static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
tmp = tcg_temp_new();
tcg_gen_ext8u_i32(tmp, val);
tcg_gen_or_i32(reg, reg, tmp);
+ tcg_temp_free(tmp);
break;
case OS_WORD:
tcg_gen_andi_i32(reg, reg, 0xffff0000);
tmp = tcg_temp_new();
tcg_gen_ext16u_i32(tmp, val);
tcg_gen_or_i32(reg, reg, tmp);
+ tcg_temp_free(tmp);
break;
case OS_LONG:
case OS_SINGLE:
@@ -650,37 +700,49 @@ static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
/* Generate code for an "effective address". Does not adjust the base
register for autoincrement addressing modes. */
-static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
- int opsize)
+static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
+ int mode, int reg0, int opsize)
{
TCGv reg;
TCGv tmp;
uint16_t ext;
uint32_t offset;
- switch ((insn >> 3) & 7) {
+ switch (mode) {
case 0: /* Data register direct. */
case 1: /* Address register direct. */
return NULL_QREG;
- case 2: /* Indirect register */
case 3: /* Indirect postincrement. */
- return AREG(insn, 0);
+ if (opsize == OS_UNSIZED) {
+ return NULL_QREG;
+ }
+ /* fallthru */
+ case 2: /* Indirect register */
+ return get_areg(s, reg0);
case 4: /* Indirect predecrememnt. */
- reg = AREG(insn, 0);
+ if (opsize == OS_UNSIZED) {
+ return NULL_QREG;
+ }
+ reg = get_areg(s, reg0);
tmp = tcg_temp_new();
- tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
+ if (reg0 == 7 && opsize == OS_BYTE &&
+ m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ tcg_gen_subi_i32(tmp, reg, 2);
+ } else {
+ tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
+ }
return tmp;
case 5: /* Indirect displacement. */
- reg = AREG(insn, 0);
+ reg = get_areg(s, reg0);
tmp = tcg_temp_new();
ext = read_im16(env, s);
tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
return tmp;
case 6: /* Indirect index + displacement. */
- reg = AREG(insn, 0);
+ reg = get_areg(s, reg0);
return gen_lea_indexed(env, s, reg);
case 7: /* Other */
- switch (insn & 7) {
+ switch (reg0) {
case 0: /* Absolute short. */
offset = (int16_t)read_im16(env, s);
return tcg_const_i32(offset);
@@ -702,39 +764,26 @@ static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
return NULL_QREG;
}
-/* Helper function for gen_ea. Reuse the computed address between the
- for read/write operands. */
-static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
- uint16_t insn, int opsize, TCGv val,
- TCGv *addrp, ea_what what)
+static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize)
{
- TCGv tmp;
-
- if (addrp && what == EA_STORE) {
- tmp = *addrp;
- } else {
- tmp = gen_lea(env, s, insn, opsize);
- if (IS_NULL_QREG(tmp))
- return tmp;
- if (addrp)
- *addrp = tmp;
- }
- return gen_ldst(s, opsize, tmp, val, what);
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
+ return gen_lea_mode(env, s, mode, reg0, opsize);
}
-/* Generate code to load/store a value from/into an EA. If VAL > 0 this is
+/* Generate code to load/store a value from/into an EA. If WHAT > 0 this is
a write otherwise it is a read (0 == sign extend, -1 == zero extend).
ADDRP is non-null for readwrite operands. */
-static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
- int opsize, TCGv val, TCGv *addrp, ea_what what)
+static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
+ int opsize, TCGv val, TCGv *addrp, ea_what what)
{
- TCGv reg;
- TCGv result;
- uint32_t offset;
+ TCGv reg, tmp, result;
+ int32_t offset;
- switch ((insn >> 3) & 7) {
+ switch (mode) {
case 0: /* Data register direct. */
- reg = DREG(insn, 0);
+ reg = cpu_dregs[reg0];
if (what == EA_STORE) {
gen_partset_reg(opsize, reg, val);
return store_dummy;
@@ -742,7 +791,7 @@ static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
return gen_extend(reg, opsize, what == EA_LOADS);
}
case 1: /* Address register direct. */
- reg = AREG(insn, 0);
+ reg = get_areg(s, reg0);
if (what == EA_STORE) {
tcg_gen_mov_i32(reg, val);
return store_dummy;
@@ -750,47 +799,61 @@ static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
return gen_extend(reg, opsize, what == EA_LOADS);
}
case 2: /* Indirect register */
- reg = AREG(insn, 0);
+ reg = get_areg(s, reg0);
return gen_ldst(s, opsize, reg, val, what);
case 3: /* Indirect postincrement. */
- reg = AREG(insn, 0);
+ reg = get_areg(s, reg0);
result = gen_ldst(s, opsize, reg, val, what);
- /* ??? This is not exception safe. The instruction may still
- fault after this point. */
- if (what == EA_STORE || !addrp)
- tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
+ if (what == EA_STORE || !addrp) {
+ TCGv tmp = tcg_temp_new();
+ if (reg0 == 7 && opsize == OS_BYTE &&
+ m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ tcg_gen_addi_i32(tmp, reg, 2);
+ } else {
+ tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
+ }
+ delay_set_areg(s, reg0, tmp, true);
+ }
return result;
case 4: /* Indirect predecrememnt. */
- {
- TCGv tmp;
- if (addrp && what == EA_STORE) {
- tmp = *addrp;
- } else {
- tmp = gen_lea(env, s, insn, opsize);
- if (IS_NULL_QREG(tmp))
- return tmp;
- if (addrp)
- *addrp = tmp;
+ if (addrp && what == EA_STORE) {
+ tmp = *addrp;
+ } else {
+ tmp = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(tmp)) {
+ return tmp;
}
- result = gen_ldst(s, opsize, tmp, val, what);
- /* ??? This is not exception safe. The instruction may still
- fault after this point. */
- if (what == EA_STORE || !addrp) {
- reg = AREG(insn, 0);
- tcg_gen_mov_i32(reg, tmp);
+ if (addrp) {
+ *addrp = tmp;
}
}
+ result = gen_ldst(s, opsize, tmp, val, what);
+ if (what == EA_STORE || !addrp) {
+ delay_set_areg(s, reg0, tmp, false);
+ }
return result;
case 5: /* Indirect displacement. */
case 6: /* Indirect index + displacement. */
- return gen_ea_once(env, s, insn, opsize, val, addrp, what);
+ do_indirect:
+ if (addrp && what == EA_STORE) {
+ tmp = *addrp;
+ } else {
+ tmp = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(tmp)) {
+ return tmp;
+ }
+ if (addrp) {
+ *addrp = tmp;
+ }
+ }
+ return gen_ldst(s, opsize, tmp, val, what);
case 7: /* Other */
- switch (insn & 7) {
+ switch (reg0) {
case 0: /* Absolute short. */
case 1: /* Absolute long. */
case 2: /* pc displacement */
case 3: /* pc index+displacement. */
- return gen_ea_once(env, s, insn, opsize, val, addrp, what);
+ goto do_indirect;
case 4: /* Immediate. */
/* Sign extend values for consistency. */
switch (opsize) {
@@ -823,6 +886,14 @@ static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
return NULL_QREG;
}
+static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize, TCGv val, TCGv *addrp, ea_what what)
+{
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
+ return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what);
+}
+
typedef struct {
TCGCond tcond;
bool g1;
@@ -1054,11 +1125,19 @@ static void gen_jmp(DisasContext *s, TCGv dest)
s->is_jmp = DISAS_JUMP;
}
+static void gen_raise_exception(int nr)
+{
+ TCGv_i32 tmp = tcg_const_i32(nr);
+
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
static void gen_exception(DisasContext *s, uint32_t where, int nr)
{
update_cc_op(s);
gen_jmp_im(s, where);
- gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
+ gen_raise_exception(nr);
}
static inline void gen_addr_fault(DisasContext *s)
@@ -1163,10 +1242,12 @@ DISAS_INSN(undef_fpu)
DISAS_INSN(undef)
{
- M68kCPU *cpu = m68k_env_get_cpu(env);
-
+ /* ??? This is both instructions that are as yet unimplemented
+ for the 680x0 series, as well as those that are implemented
+ but actually illegal for CPU32 or pre-68020. */
+ qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
+ insn, s->pc - 2);
gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
- cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
}
DISAS_INSN(mulw)
@@ -1187,71 +1268,297 @@ DISAS_INSN(mulw)
tcg_gen_mul_i32(tmp, tmp, src);
tcg_gen_mov_i32(reg, tmp);
gen_logic_cc(s, tmp, OS_LONG);
+ tcg_temp_free(tmp);
}
DISAS_INSN(divw)
{
- TCGv reg;
- TCGv tmp;
- TCGv src;
int sign;
+ TCGv src;
+ TCGv destr;
+
+ /* divX.w <EA>,Dn 32/16 -> 16r:16q */
sign = (insn & 0x100) != 0;
- reg = DREG(insn, 9);
- if (sign) {
- tcg_gen_ext16s_i32(QREG_DIV1, reg);
- } else {
- tcg_gen_ext16u_i32(QREG_DIV1, reg);
- }
+
+ /* dest.l / src.w */
+
SRC_EA(env, src, OS_WORD, sign, NULL);
- tcg_gen_mov_i32(QREG_DIV2, src);
+ destr = tcg_const_i32(REG(insn, 9));
if (sign) {
- gen_helper_divs(cpu_env, tcg_const_i32(1));
+ gen_helper_divsw(cpu_env, destr, src);
} else {
- gen_helper_divu(cpu_env, tcg_const_i32(1));
+ gen_helper_divuw(cpu_env, destr, src);
}
-
- tmp = tcg_temp_new();
- src = tcg_temp_new();
- tcg_gen_ext16u_i32(tmp, QREG_DIV1);
- tcg_gen_shli_i32(src, QREG_DIV2, 16);
- tcg_gen_or_i32(reg, tmp, src);
+ tcg_temp_free(destr);
set_cc_op(s, CC_OP_FLAGS);
}
DISAS_INSN(divl)
{
- TCGv num;
- TCGv den;
- TCGv reg;
+ TCGv num, reg, den;
+ int sign;
uint16_t ext;
ext = read_im16(env, s);
- if (ext & 0x87f8) {
- gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
+
+ sign = (ext & 0x0800) != 0;
+
+ if (ext & 0x400) {
+ if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
+ gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
+ return;
+ }
+
+ /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
+
+ SRC_EA(env, den, OS_LONG, 0, NULL);
+ num = tcg_const_i32(REG(ext, 12));
+ reg = tcg_const_i32(REG(ext, 0));
+ if (sign) {
+ gen_helper_divsll(cpu_env, num, reg, den);
+ } else {
+ gen_helper_divull(cpu_env, num, reg, den);
+ }
+ tcg_temp_free(reg);
+ tcg_temp_free(num);
+ set_cc_op(s, CC_OP_FLAGS);
return;
}
- num = DREG(ext, 12);
- reg = DREG(ext, 0);
- tcg_gen_mov_i32(QREG_DIV1, num);
+
+ /* divX.l <EA>, Dq 32/32 -> 32q */
+ /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
+
SRC_EA(env, den, OS_LONG, 0, NULL);
- tcg_gen_mov_i32(QREG_DIV2, den);
- if (ext & 0x0800) {
- gen_helper_divs(cpu_env, tcg_const_i32(0));
- } else {
- gen_helper_divu(cpu_env, tcg_const_i32(0));
- }
- if ((ext & 7) == ((ext >> 12) & 7)) {
- /* div */
- tcg_gen_mov_i32 (reg, QREG_DIV1);
+ num = tcg_const_i32(REG(ext, 12));
+ reg = tcg_const_i32(REG(ext, 0));
+ if (sign) {
+ gen_helper_divsl(cpu_env, num, reg, den);
} else {
- /* rem */
- tcg_gen_mov_i32 (reg, QREG_DIV2);
+ gen_helper_divul(cpu_env, num, reg, den);
}
+ tcg_temp_free(reg);
+ tcg_temp_free(num);
+
set_cc_op(s, CC_OP_FLAGS);
}
+static void bcd_add(TCGv dest, TCGv src)
+{
+ TCGv t0, t1;
+
+ /* dest10 = dest10 + src10 + X
+ *
+ * t1 = src
+ * t2 = t1 + 0x066
+ * t3 = t2 + dest + X
+ * t4 = t2 ^ dest
+ * t5 = t3 ^ t4
+ * t6 = ~t5 & 0x110
+ * t7 = (t6 >> 2) | (t6 >> 3)
+ * return t3 - t7
+ */
+
+ /* t1 = (src + 0x066) + dest + X
+ * = result with some possible exceding 0x6
+ */
+
+ t0 = tcg_const_i32(0x066);
+ tcg_gen_add_i32(t0, t0, src);
+
+ t1 = tcg_temp_new();
+ tcg_gen_add_i32(t1, t0, dest);
+ tcg_gen_add_i32(t1, t1, QREG_CC_X);
+
+ /* we will remove exceding 0x6 where there is no carry */
+
+ /* t0 = (src + 0x0066) ^ dest
+ * = t1 without carries
+ */
+
+ tcg_gen_xor_i32(t0, t0, dest);
+
+ /* extract the carries
+ * t0 = t0 ^ t1
+ * = only the carries
+ */
+
+ tcg_gen_xor_i32(t0, t0, t1);
+
+ /* generate 0x1 where there is no carry
+ * and for each 0x10, generate a 0x6
+ */
+
+ tcg_gen_shri_i32(t0, t0, 3);
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 0x22);
+ tcg_gen_add_i32(dest, t0, t0);
+ tcg_gen_add_i32(dest, dest, t0);
+ tcg_temp_free(t0);
+
+ /* remove the exceding 0x6
+ * for digits that have not generated a carry
+ */
+
+ tcg_gen_sub_i32(dest, t1, dest);
+ tcg_temp_free(t1);
+}
+
+static void bcd_sub(TCGv dest, TCGv src)
+{
+ TCGv t0, t1, t2;
+
+ /* dest10 = dest10 - src10 - X
+ * = bcd_add(dest + 1 - X, 0x199 - src)
+ */
+
+ /* t0 = 0x066 + (0x199 - src) */
+
+ t0 = tcg_temp_new();
+ tcg_gen_subfi_i32(t0, 0x1ff, src);
+
+ /* t1 = t0 + dest + 1 - X*/
+
+ t1 = tcg_temp_new();
+ tcg_gen_add_i32(t1, t0, dest);
+ tcg_gen_addi_i32(t1, t1, 1);
+ tcg_gen_sub_i32(t1, t1, QREG_CC_X);
+
+ /* t2 = t0 ^ dest */
+
+ t2 = tcg_temp_new();
+ tcg_gen_xor_i32(t2, t0, dest);
+
+ /* t0 = t1 ^ t2 */
+
+ tcg_gen_xor_i32(t0, t1, t2);
+
+ /* t2 = ~t0 & 0x110
+ * t0 = (t2 >> 2) | (t2 >> 3)
+ *
+ * to fit on 8bit operands, changed in:
+ *
+ * t2 = ~(t0 >> 3) & 0x22
+ * t0 = t2 + t2
+ * t0 = t0 + t2
+ */
+
+ tcg_gen_shri_i32(t2, t0, 3);
+ tcg_gen_not_i32(t2, t2);
+ tcg_gen_andi_i32(t2, t2, 0x22);
+ tcg_gen_add_i32(t0, t2, t2);
+ tcg_gen_add_i32(t0, t0, t2);
+ tcg_temp_free(t2);
+
+ /* return t1 - t0 */
+
+ tcg_gen_sub_i32(dest, t1, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void bcd_flags(TCGv val)
+{
+ tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
+
+ tcg_gen_shri_i32(QREG_CC_C, val, 8);
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+
+ tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
+}
+
+DISAS_INSN(abcd_reg)
+{
+ TCGv src;
+ TCGv dest;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
+ dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
+ bcd_add(dest, src);
+ gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(abcd_mem)
+{
+ TCGv src, dest, addr;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ /* Indirect pre-decrement load (mode 4) */
+
+ src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
+ NULL_QREG, NULL, EA_LOADU);
+ dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
+ NULL_QREG, &addr, EA_LOADU);
+
+ bcd_add(dest, src);
+
+ gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(sbcd_reg)
+{
+ TCGv src, dest;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ src = gen_extend(DREG(insn, 0), OS_BYTE, 0);
+ dest = gen_extend(DREG(insn, 9), OS_BYTE, 0);
+
+ bcd_sub(dest, src);
+
+ gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(sbcd_mem)
+{
+ TCGv src, dest, addr;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ /* Indirect pre-decrement load (mode 4) */
+
+ src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
+ NULL_QREG, NULL, EA_LOADU);
+ dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
+ NULL_QREG, &addr, EA_LOADU);
+
+ bcd_sub(dest, src);
+
+ gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr, EA_STORE);
+
+ bcd_flags(dest);
+}
+
+DISAS_INSN(nbcd)
+{
+ TCGv src, dest;
+ TCGv addr;
+
+ gen_flush_flags(s); /* !Z is sticky */
+
+ SRC_EA(env, src, OS_BYTE, 0, &addr);
+
+ dest = tcg_const_i32(0);
+ bcd_sub(dest, src);
+
+ DEST_EA(env, insn, OS_BYTE, dest, &addr);
+
+ bcd_flags(dest);
+
+ tcg_temp_free(dest);
+}
+
DISAS_INSN(addsub)
{
TCGv reg;
@@ -1367,42 +1674,125 @@ static void gen_push(DisasContext *s, TCGv val)
tcg_gen_subi_i32(tmp, QREG_SP, 4);
gen_store(s, OS_LONG, tmp, val);
tcg_gen_mov_i32(QREG_SP, tmp);
+ tcg_temp_free(tmp);
+}
+
+static TCGv mreg(int reg)
+{
+ if (reg < 8) {
+ /* Dx */
+ return cpu_dregs[reg];
+ }
+ /* Ax */
+ return cpu_aregs[reg & 7];
}
DISAS_INSN(movem)
{
- TCGv addr;
+ TCGv addr, incr, tmp, r[16];
+ int is_load = (insn & 0x0400) != 0;
+ int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
+ uint16_t mask = read_im16(env, s);
+ int mode = extract32(insn, 3, 3);
+ int reg0 = REG(insn, 0);
int i;
- uint16_t mask;
- TCGv reg;
- TCGv tmp;
- int is_load;
- mask = read_im16(env, s);
- tmp = gen_lea(env, s, insn, OS_LONG);
- if (IS_NULL_QREG(tmp)) {
+ tmp = cpu_aregs[reg0];
+
+ switch (mode) {
+ case 0: /* data register direct */
+ case 1: /* addr register direct */
+ do_addr_fault:
gen_addr_fault(s);
return;
+
+ case 2: /* indirect */
+ break;
+
+ case 3: /* indirect post-increment */
+ if (!is_load) {
+ /* post-increment is not allowed */
+ goto do_addr_fault;
+ }
+ break;
+
+ case 4: /* indirect pre-decrement */
+ if (is_load) {
+ /* pre-decrement is not allowed */
+ goto do_addr_fault;
+ }
+ /* We want a bare copy of the address reg, without any pre-decrement
+ adjustment, as gen_lea would provide. */
+ break;
+
+ default:
+ tmp = gen_lea_mode(env, s, mode, reg0, opsize);
+ if (IS_NULL_QREG(tmp)) {
+ goto do_addr_fault;
+ }
+ break;
}
+
addr = tcg_temp_new();
tcg_gen_mov_i32(addr, tmp);
- is_load = ((insn & 0x0400) != 0);
- for (i = 0; i < 16; i++, mask >>= 1) {
- if (mask & 1) {
- if (i < 8)
- reg = DREG(i, 0);
- else
- reg = AREG(i, 0);
- if (is_load) {
- tmp = gen_load(s, OS_LONG, addr, 0);
- tcg_gen_mov_i32(reg, tmp);
- } else {
- gen_store(s, OS_LONG, addr, reg);
+ incr = tcg_const_i32(opsize_bytes(opsize));
+
+ if (is_load) {
+ /* memory to register */
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i)) {
+ r[i] = gen_load(s, opsize, addr, 1);
+ tcg_gen_add_i32(addr, addr, incr);
+ }
+ }
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i)) {
+ tcg_gen_mov_i32(mreg(i), r[i]);
+ tcg_temp_free(r[i]);
+ }
+ }
+ if (mode == 3) {
+ /* post-increment: movem (An)+,X */
+ tcg_gen_mov_i32(cpu_aregs[reg0], addr);
+ }
+ } else {
+ /* register to memory */
+ if (mode == 4) {
+ /* pre-decrement: movem X,-(An) */
+ for (i = 15; i >= 0; i--) {
+ if ((mask << i) & 0x8000) {
+ tcg_gen_sub_i32(addr, addr, incr);
+ if (reg0 + 8 == i &&
+ m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
+ /* M68020+: if the addressing register is the
+ * register moved to memory, the value written
+ * is the initial value decremented by the size of
+ * the operation, regardless of how many actual
+ * stores have been performed until this point.
+ * M68000/M68010: the value is the initial value.
+ */
+ tmp = tcg_temp_new();
+ tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
+ gen_store(s, opsize, addr, tmp);
+ tcg_temp_free(tmp);
+ } else {
+ gen_store(s, opsize, addr, mreg(i));
+ }
+ }
+ }
+ tcg_gen_mov_i32(cpu_aregs[reg0], addr);
+ } else {
+ for (i = 0; i < 16; i++) {
+ if (mask & (1 << i)) {
+ gen_store(s, opsize, addr, mreg(i));
+ tcg_gen_add_i32(addr, addr, incr);
+ }
}
- if (mask != 1)
- tcg_gen_addi_i32(addr, addr, 4);
}
}
+
+ tcg_temp_free(incr);
+ tcg_temp_free(addr);
}
DISAS_INSN(bitop_im)
@@ -1422,9 +1812,16 @@ DISAS_INSN(bitop_im)
op = (insn >> 6) & 3;
bitnum = read_im16(env, s);
- if (bitnum & 0xff00) {
- disas_undef(env, s, insn);
- return;
+ if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ if (bitnum & 0xfe00) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ } else {
+ if (bitnum & 0xff00) {
+ disas_undef(env, s, insn);
+ return;
+ }
}
SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
@@ -1522,6 +1919,163 @@ DISAS_INSN(arith_im)
tcg_temp_free(dest);
}
+DISAS_INSN(cas)
+{
+ int opsize;
+ TCGv addr;
+ uint16_t ext;
+ TCGv load;
+ TCGv cmp;
+ TCGMemOp opc;
+
+ switch ((insn >> 9) & 3) {
+ case 1:
+ opsize = OS_BYTE;
+ opc = MO_SB;
+ break;
+ case 2:
+ opsize = OS_WORD;
+ opc = MO_TESW;
+ break;
+ case 3:
+ opsize = OS_LONG;
+ opc = MO_TESL;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ext = read_im16(env, s);
+
+ /* cas Dc,Du,<EA> */
+
+ addr = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ cmp = gen_extend(DREG(ext, 0), opsize, 1);
+
+ /* if <EA> == Dc then
+ * <EA> = Du
+ * Dc = <EA> (because <EA> == Dc)
+ * else
+ * Dc = <EA>
+ */
+
+ load = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
+ IS_USER(s), opc);
+ /* update flags before setting cmp to load */
+ gen_update_cc_cmp(s, load, cmp, opsize);
+ gen_partset_reg(opsize, DREG(ext, 0), load);
+
+ tcg_temp_free(load);
+
+ switch (extract32(insn, 3, 3)) {
+ case 3: /* Indirect postincrement. */
+ tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
+ break;
+ case 4: /* Indirect predecrememnt. */
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ break;
+ }
+}
+
+DISAS_INSN(cas2w)
+{
+ uint16_t ext1, ext2;
+ TCGv addr1, addr2;
+ TCGv regs;
+
+ /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
+
+ ext1 = read_im16(env, s);
+
+ if (ext1 & 0x8000) {
+ /* Address Register */
+ addr1 = AREG(ext1, 12);
+ } else {
+ /* Data Register */
+ addr1 = DREG(ext1, 12);
+ }
+
+ ext2 = read_im16(env, s);
+ if (ext2 & 0x8000) {
+ /* Address Register */
+ addr2 = AREG(ext2, 12);
+ } else {
+ /* Data Register */
+ addr2 = DREG(ext2, 12);
+ }
+
+ /* if (R1) == Dc1 && (R2) == Dc2 then
+ * (R1) = Du1
+ * (R2) = Du2
+ * else
+ * Dc1 = (R1)
+ * Dc2 = (R2)
+ */
+
+ regs = tcg_const_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
+ gen_helper_cas2w(cpu_env, regs, addr1, addr2);
+ tcg_temp_free(regs);
+
+ /* Note that cas2w also assigned to env->cc_op. */
+ s->cc_op = CC_OP_CMPW;
+ s->cc_op_synced = 1;
+}
+
+DISAS_INSN(cas2l)
+{
+ uint16_t ext1, ext2;
+ TCGv addr1, addr2, regs;
+
+ /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
+
+ ext1 = read_im16(env, s);
+
+ if (ext1 & 0x8000) {
+ /* Address Register */
+ addr1 = AREG(ext1, 12);
+ } else {
+ /* Data Register */
+ addr1 = DREG(ext1, 12);
+ }
+
+ ext2 = read_im16(env, s);
+ if (ext2 & 0x8000) {
+ /* Address Register */
+ addr2 = AREG(ext2, 12);
+ } else {
+ /* Data Register */
+ addr2 = DREG(ext2, 12);
+ }
+
+ /* if (R1) == Dc1 && (R2) == Dc2 then
+ * (R1) = Du1
+ * (R2) = Du2
+ * else
+ * Dc1 = (R1)
+ * Dc2 = (R2)
+ */
+
+ regs = tcg_const_i32(REG(ext2, 6) |
+ (REG(ext1, 6) << 3) |
+ (REG(ext2, 0) << 6) |
+ (REG(ext1, 0) << 9));
+ gen_helper_cas2l(cpu_env, regs, addr1, addr2);
+ tcg_temp_free(regs);
+
+ /* Note that cas2l also assigned to env->cc_op. */
+ s->cc_op = CC_OP_CMPL;
+ s->cc_op_synced = 1;
+}
+
DISAS_INSN(byterev)
{
TCGv reg;
@@ -1626,10 +2180,14 @@ DISAS_INSN(lea)
DISAS_INSN(clr)
{
int opsize;
+ TCGv zero;
+
+ zero = tcg_const_i32(0);
opsize = insn_opsize(insn);
- DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
- gen_logic_cc(s, tcg_const_i32(0), opsize);
+ DEST_EA(env, insn, opsize, zero, NULL);
+ gen_logic_cc(s, zero, opsize);
+ tcg_temp_free(zero);
}
static TCGv gen_get_ccr(DisasContext *s)
@@ -1735,6 +2293,8 @@ DISAS_INSN(swap)
tcg_gen_shli_i32(src1, reg, 16);
tcg_gen_shri_i32(src2, reg, 16);
tcg_gen_or_i32(reg, src1, src2);
+ tcg_temp_free(src2);
+ tcg_temp_free(src1);
gen_logic_cc(s, reg, OS_LONG);
}
@@ -1773,6 +2333,7 @@ DISAS_INSN(ext)
else
tcg_gen_mov_i32(reg, tmp);
gen_logic_cc(s, tmp, OS_LONG);
+ tcg_temp_free(tmp);
}
DISAS_INSN(tst)
@@ -1807,29 +2368,68 @@ DISAS_INSN(tas)
gen_logic_cc(s, src1, OS_BYTE);
tcg_gen_ori_i32(dest, src1, 0x80);
DEST_EA(env, insn, OS_BYTE, dest, &addr);
+ tcg_temp_free(dest);
}
DISAS_INSN(mull)
{
uint16_t ext;
- TCGv reg;
TCGv src1;
- TCGv dest;
+ int sign;
- /* The upper 32 bits of the product are discarded, so
- muls.l and mulu.l are functionally equivalent. */
ext = read_im16(env, s);
- if (ext & 0x87ff) {
- gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
+
+ sign = ext & 0x800;
+
+ if (ext & 0x400) {
+ if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
+ gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
+ return;
+ }
+
+ SRC_EA(env, src1, OS_LONG, 0, NULL);
+
+ if (sign) {
+ tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
+ } else {
+ tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
+ }
+ /* if Dl == Dh, 68040 returns low word */
+ tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
+ tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
+
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ tcg_gen_movi_i32(QREG_CC_C, 0);
+
+ set_cc_op(s, CC_OP_FLAGS);
return;
}
- reg = DREG(ext, 12);
SRC_EA(env, src1, OS_LONG, 0, NULL);
- dest = tcg_temp_new();
- tcg_gen_mul_i32(dest, src1, reg);
- tcg_gen_mov_i32(reg, dest);
- /* Unlike m68k, coldfire always clears the overflow bit. */
- gen_logic_cc(s, dest, OS_LONG);
+ if (m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ tcg_gen_movi_i32(QREG_CC_C, 0);
+ if (sign) {
+ tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
+ /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
+ tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
+ } else {
+ tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
+ /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
+ }
+ tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
+ tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
+
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+
+ set_cc_op(s, CC_OP_FLAGS);
+ } else {
+ /* The upper 32 bits of the product are discarded, so
+ muls.l and mulu.l are functionally equivalent. */
+ tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
+ gen_logic_cc(s, DREG(ext, 12), OS_LONG);
+ }
}
static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
@@ -1876,6 +2476,7 @@ DISAS_INSN(unlk)
tmp = gen_load(s, OS_LONG, src, 0);
tcg_gen_mov_i32(reg, tmp);
tcg_gen_addi_i32(QREG_SP, src, 4);
+ tcg_temp_free(src);
}
DISAS_INSN(nop)
@@ -1952,7 +2553,9 @@ DISAS_INSN(addsubq)
}
gen_update_cc_add(dest, val, opsize);
}
+ tcg_temp_free(val);
DEST_EA(env, insn, opsize, dest, &addr);
+ tcg_temp_free(dest);
}
DISAS_INSN(tpf)
@@ -2005,11 +2608,8 @@ DISAS_INSN(branch)
DISAS_INSN(moveq)
{
- uint32_t val;
-
- val = (int8_t)insn;
- tcg_gen_movi_i32(DREG(insn, 9), val);
- gen_logic_cc(s, tcg_const_i32(val), OS_LONG);
+ tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
+ gen_logic_cc(s, DREG(insn, 9), OS_LONG);
}
DISAS_INSN(mvzs)
@@ -2049,6 +2649,7 @@ DISAS_INSN(or)
gen_partset_reg(opsize, DREG(insn, 9), dest);
}
gen_logic_cc(s, dest, opsize);
+ tcg_temp_free(dest);
}
DISAS_INSN(suba)
@@ -2143,6 +2744,7 @@ DISAS_INSN(mov3q)
src = tcg_const_i32(val);
gen_logic_cc(s, src, OS_LONG);
DEST_EA(env, insn, OS_LONG, src, NULL);
+ tcg_temp_free(src);
}
DISAS_INSN(cmp)
@@ -2173,6 +2775,21 @@ DISAS_INSN(cmpa)
gen_update_cc_cmp(s, reg, src, OS_LONG);
}
+DISAS_INSN(cmpm)
+{
+ int opsize = insn_opsize(insn);
+ TCGv src, dst;
+
+ /* Post-increment load (mode 3) from Ay. */
+ src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
+ NULL_QREG, NULL, EA_LOADS);
+ /* Post-increment load (mode 3) from Ax. */
+ dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
+ NULL_QREG, NULL, EA_LOADS);
+
+ gen_update_cc_cmp(s, dst, src, opsize);
+}
+
DISAS_INSN(eor)
{
TCGv src;
@@ -2187,6 +2804,7 @@ DISAS_INSN(eor)
tcg_gen_xor_i32(dest, src, DREG(insn, 9));
gen_logic_cc(s, dest, opsize);
DEST_EA(env, insn, opsize, dest, &addr);
+ tcg_temp_free(dest);
}
static void do_exg(TCGv reg1, TCGv reg2)
@@ -2237,8 +2855,8 @@ DISAS_INSN(and)
tcg_gen_and_i32(dest, src, reg);
gen_partset_reg(opsize, reg, dest);
}
- tcg_temp_free(dest);
gen_logic_cc(s, dest, opsize);
+ tcg_temp_free(dest);
}
DISAS_INSN(adda)
@@ -2321,49 +2939,966 @@ DISAS_INSN(addx_mem)
gen_store(s, opsize, addr_dest, QREG_CC_N);
}
-/* TODO: This could be implemented without helper functions. */
+static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
+{
+ int count = (insn >> 9) & 7;
+ int logical = insn & 8;
+ int left = insn & 0x100;
+ int bits = opsize_bytes(opsize) * 8;
+ TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
+
+ if (count == 0) {
+ count = 8;
+ }
+
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ if (left) {
+ tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
+ tcg_gen_shli_i32(QREG_CC_N, reg, count);
+
+ /* Note that ColdFire always clears V (done above),
+ while M68000 sets if the most significant bit is changed at
+ any time during the shift operation */
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ /* if shift count >= bits, V is (reg != 0) */
+ if (count >= bits) {
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
+ } else {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
+ tcg_gen_sari_i32(t0, reg, bits - count - 1);
+ tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ }
+ tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
+ }
+ } else {
+ tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
+ if (logical) {
+ tcg_gen_shri_i32(QREG_CC_N, reg, count);
+ } else {
+ tcg_gen_sari_i32(QREG_CC_N, reg, count);
+ }
+ }
+
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
+
+ gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
+{
+ int logical = insn & 8;
+ int left = insn & 0x100;
+ int bits = opsize_bytes(opsize) * 8;
+ TCGv reg = gen_extend(DREG(insn, 0), opsize, !logical);
+ TCGv s32;
+ TCGv_i64 t64, s64;
+
+ t64 = tcg_temp_new_i64();
+ s64 = tcg_temp_new_i64();
+ s32 = tcg_temp_new();
+
+ /* Note that m68k truncates the shift count modulo 64, not 32.
+ In addition, a 64-bit shift makes it easy to find "the last
+ bit shifted out", for the carry flag. */
+ tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
+ tcg_gen_extu_i32_i64(s64, s32);
+ tcg_gen_extu_i32_i64(t64, reg);
+
+ /* Optimistically set V=0. Also used as a zero source below. */
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ if (left) {
+ tcg_gen_shl_i64(t64, t64, s64);
+
+ if (opsize == OS_LONG) {
+ tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
+ /* Note that C=0 if shift count is 0, and we get that for free. */
+ } else {
+ TCGv zero = tcg_const_i32(0);
+ tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
+ tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ s32, zero, zero, QREG_CC_C);
+ tcg_temp_free(zero);
+ }
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+
+ /* X = C, but only if the shift count was non-zero. */
+ tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
+ QREG_CC_C, QREG_CC_X);
+
+ /* M68000 sets V if the most significant bit is changed at
+ * any time during the shift operation. Do this via creating
+ * an extension of the sign bit, comparing, and discarding
+ * the bits below the sign bit. I.e.
+ * int64_t s = (intN_t)reg;
+ * int64_t t = (int64_t)(intN_t)reg << count;
+ * V = ((s ^ t) & (-1 << (bits - 1))) != 0
+ */
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ TCGv_i64 tt = tcg_const_i64(32);
+ /* if shift is greater than 32, use 32 */
+ tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
+ tcg_temp_free_i64(tt);
+ /* Sign extend the input to 64 bits; re-do the shift. */
+ tcg_gen_ext_i32_i64(t64, reg);
+ tcg_gen_shl_i64(s64, t64, s64);
+ /* Clear all bits that are unchanged. */
+ tcg_gen_xor_i64(t64, t64, s64);
+ /* Ignore the bits below the sign bit. */
+ tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
+ /* If any bits remain set, we have overflow. */
+ tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
+ tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
+ tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
+ }
+ } else {
+ tcg_gen_shli_i64(t64, t64, 32);
+ if (logical) {
+ tcg_gen_shr_i64(t64, t64, s64);
+ } else {
+ tcg_gen_sar_i64(t64, t64, s64);
+ }
+ tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
+
+ /* Note that C=0 if shift count is 0, and we get that for free. */
+ tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
+
+ /* X = C, but only if the shift count was non-zero. */
+ tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
+ QREG_CC_C, QREG_CC_X);
+ }
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+
+ tcg_temp_free(s32);
+ tcg_temp_free_i64(s64);
+ tcg_temp_free_i64(t64);
+
+ /* Write back the result. */
+ gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(shift8_im)
+{
+ shift_im(s, insn, OS_BYTE);
+}
+
+DISAS_INSN(shift16_im)
+{
+ shift_im(s, insn, OS_WORD);
+}
+
DISAS_INSN(shift_im)
{
- TCGv reg;
- int tmp;
+ shift_im(s, insn, OS_LONG);
+}
+
+DISAS_INSN(shift8_reg)
+{
+ shift_reg(s, insn, OS_BYTE);
+}
+
+DISAS_INSN(shift16_reg)
+{
+ shift_reg(s, insn, OS_WORD);
+}
+
+DISAS_INSN(shift_reg)
+{
+ shift_reg(s, insn, OS_LONG);
+}
+
+DISAS_INSN(shift_mem)
+{
+ int logical = insn & 8;
+ int left = insn & 0x100;
+ TCGv src;
+ TCGv addr;
+
+ SRC_EA(env, src, OS_WORD, !logical, &addr);
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ if (left) {
+ tcg_gen_shri_i32(QREG_CC_C, src, 15);
+ tcg_gen_shli_i32(QREG_CC_N, src, 1);
+
+ /* Note that ColdFire always clears V,
+ while M68000 sets if the most significant bit is changed at
+ any time during the shift operation */
+ if (!logical && m68k_feature(s->env, M68K_FEATURE_M68000)) {
+ src = gen_extend(src, OS_WORD, 1);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
+ }
+ } else {
+ tcg_gen_mov_i32(QREG_CC_C, src);
+ if (logical) {
+ tcg_gen_shri_i32(QREG_CC_N, src, 1);
+ } else {
+ tcg_gen_sari_i32(QREG_CC_N, src, 1);
+ }
+ }
+
+ gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
+ tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
+
+ DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void rotate(TCGv reg, TCGv shift, int left, int size)
+{
+ switch (size) {
+ case 8:
+ /* Replicate the 8-bit input so that a 32-bit rotate works. */
+ tcg_gen_ext8u_i32(reg, reg);
+ tcg_gen_muli_i32(reg, reg, 0x01010101);
+ goto do_long;
+ case 16:
+ /* Replicate the 16-bit input so that a 32-bit rotate works. */
+ tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
+ goto do_long;
+ do_long:
+ default:
+ if (left) {
+ tcg_gen_rotl_i32(reg, reg, shift);
+ } else {
+ tcg_gen_rotr_i32(reg, reg, shift);
+ }
+ }
+
+ /* compute flags */
+
+ switch (size) {
+ case 8:
+ tcg_gen_ext8s_i32(reg, reg);
+ break;
+ case 16:
+ tcg_gen_ext16s_i32(reg, reg);
+ break;
+ default:
+ break;
+ }
+
+ /* QREG_CC_X is not affected */
+
+ tcg_gen_mov_i32(QREG_CC_N, reg);
+ tcg_gen_mov_i32(QREG_CC_Z, reg);
+
+ if (left) {
+ tcg_gen_andi_i32(QREG_CC_C, reg, 1);
+ } else {
+ tcg_gen_shri_i32(QREG_CC_C, reg, 31);
+ }
+
+ tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
+}
+
+static void rotate_x_flags(TCGv reg, TCGv X, int size)
+{
+ switch (size) {
+ case 8:
+ tcg_gen_ext8s_i32(reg, reg);
+ break;
+ case 16:
+ tcg_gen_ext16s_i32(reg, reg);
+ break;
+ default:
+ break;
+ }
+ tcg_gen_mov_i32(QREG_CC_N, reg);
+ tcg_gen_mov_i32(QREG_CC_Z, reg);
+ tcg_gen_mov_i32(QREG_CC_X, X);
+ tcg_gen_mov_i32(QREG_CC_C, X);
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+}
+
+/* Result of rotate_x() is valid if 0 <= shift <= size */
+static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
+{
+ TCGv X, shl, shr, shx, sz, zero;
+
+ sz = tcg_const_i32(size);
+
+ shr = tcg_temp_new();
+ shl = tcg_temp_new();
+ shx = tcg_temp_new();
+ if (left) {
+ tcg_gen_mov_i32(shl, shift); /* shl = shift */
+ tcg_gen_movi_i32(shr, size + 1);
+ tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
+ tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
+ /* shx = shx < 0 ? size : shx; */
+ zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
+ tcg_temp_free(zero);
+ } else {
+ tcg_gen_mov_i32(shr, shift); /* shr = shift */
+ tcg_gen_movi_i32(shl, size + 1);
+ tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
+ tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
+ }
+
+ /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
+
+ tcg_gen_shl_i32(shl, reg, shl);
+ tcg_gen_shr_i32(shr, reg, shr);
+ tcg_gen_or_i32(reg, shl, shr);
+ tcg_temp_free(shl);
+ tcg_temp_free(shr);
+ tcg_gen_shl_i32(shx, QREG_CC_X, shx);
+ tcg_gen_or_i32(reg, reg, shx);
+ tcg_temp_free(shx);
+
+ /* X = (reg >> size) & 1 */
+
+ X = tcg_temp_new();
+ tcg_gen_shr_i32(X, reg, sz);
+ tcg_gen_andi_i32(X, X, 1);
+ tcg_temp_free(sz);
+
+ return X;
+}
+
+/* Result of rotate32_x() is valid if 0 <= shift < 33 */
+static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
+{
+ TCGv_i64 t0, shift64;
+ TCGv X, lo, hi, zero;
+
+ shift64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(shift64, shift);
+
+ t0 = tcg_temp_new_i64();
+
+ X = tcg_temp_new();
+ lo = tcg_temp_new();
+ hi = tcg_temp_new();
+
+ if (left) {
+ /* create [reg:X:..] */
+
+ tcg_gen_shli_i32(lo, QREG_CC_X, 31);
+ tcg_gen_concat_i32_i64(t0, lo, reg);
+
+ /* rotate */
+
+ tcg_gen_rotl_i64(t0, t0, shift64);
+ tcg_temp_free_i64(shift64);
+
+ /* result is [reg:..:reg:X] */
+
+ tcg_gen_extr_i64_i32(lo, hi, t0);
+ tcg_gen_andi_i32(X, lo, 1);
+
+ tcg_gen_shri_i32(lo, lo, 1);
+ } else {
+ /* create [..:X:reg] */
+
+ tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
+
+ tcg_gen_rotr_i64(t0, t0, shift64);
+ tcg_temp_free_i64(shift64);
+
+ /* result is value: [X:reg:..:reg] */
+
+ tcg_gen_extr_i64_i32(lo, hi, t0);
+
+ /* extract X */
+
+ tcg_gen_shri_i32(X, hi, 31);
+
+ /* extract result */
+
+ tcg_gen_shli_i32(hi, hi, 1);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_gen_or_i32(lo, lo, hi);
+ tcg_temp_free(hi);
+
+ /* if shift == 0, register and X are not affected */
+
+ zero = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
+ tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
+ tcg_temp_free(zero);
+ tcg_temp_free(lo);
+
+ return X;
+}
+
+DISAS_INSN(rotate_im)
+{
TCGv shift;
+ int tmp;
+ int left = (insn & 0x100);
+
+ tmp = (insn >> 9) & 7;
+ if (tmp == 0) {
+ tmp = 8;
+ }
+
+ shift = tcg_const_i32(tmp);
+ if (insn & 8) {
+ rotate(DREG(insn, 0), shift, left, 32);
+ } else {
+ TCGv X = rotate32_x(DREG(insn, 0), shift, left);
+ rotate_x_flags(DREG(insn, 0), X, 32);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate8_im)
+{
+ int left = (insn & 0x100);
+ TCGv reg;
+ TCGv shift;
+ int tmp;
+
+ reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
- reg = DREG(insn, 0);
tmp = (insn >> 9) & 7;
- if (tmp == 0)
+ if (tmp == 0) {
tmp = 8;
+ }
+
shift = tcg_const_i32(tmp);
- /* No need to flush flags becuse we know we will set C flag. */
- if (insn & 0x100) {
- gen_helper_shl_cc(reg, cpu_env, reg, shift);
+ if (insn & 8) {
+ rotate(reg, shift, left, 8);
} else {
- if (insn & 8) {
- gen_helper_shr_cc(reg, cpu_env, reg, shift);
- } else {
- gen_helper_sar_cc(reg, cpu_env, reg, shift);
- }
+ TCGv X = rotate_x(reg, shift, left, 8);
+ rotate_x_flags(reg, X, 8);
+ tcg_temp_free(X);
}
+ tcg_temp_free(shift);
+ gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
}
-DISAS_INSN(shift_reg)
+DISAS_INSN(rotate16_im)
{
+ int left = (insn & 0x100);
TCGv reg;
TCGv shift;
+ int tmp;
+
+ reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
+ tmp = (insn >> 9) & 7;
+ if (tmp == 0) {
+ tmp = 8;
+ }
+
+ shift = tcg_const_i32(tmp);
+ if (insn & 8) {
+ rotate(reg, shift, left, 16);
+ } else {
+ TCGv X = rotate_x(reg, shift, left, 16);
+ rotate_x_flags(reg, X, 16);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
+ gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate_reg)
+{
+ TCGv reg;
+ TCGv src;
+ TCGv t0, t1;
+ int left = (insn & 0x100);
reg = DREG(insn, 0);
- shift = DREG(insn, 9);
- if (insn & 0x100) {
- gen_helper_shl_cc(reg, cpu_env, reg, shift);
+ src = DREG(insn, 9);
+ /* shift in [0..63] */
+ t0 = tcg_temp_new();
+ tcg_gen_andi_i32(t0, src, 63);
+ t1 = tcg_temp_new_i32();
+ if (insn & 8) {
+ tcg_gen_andi_i32(t1, src, 31);
+ rotate(reg, t1, left, 32);
+ /* if shift == 0, clear C */
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ t0, QREG_CC_V /* 0 */,
+ QREG_CC_V /* 0 */, QREG_CC_C);
+ } else {
+ TCGv X;
+ /* modulo 33 */
+ tcg_gen_movi_i32(t1, 33);
+ tcg_gen_remu_i32(t1, t0, t1);
+ X = rotate32_x(DREG(insn, 0), t1, left);
+ rotate_x_flags(DREG(insn, 0), X, 32);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate8_reg)
+{
+ TCGv reg;
+ TCGv src;
+ TCGv t0, t1;
+ int left = (insn & 0x100);
+
+ reg = gen_extend(DREG(insn, 0), OS_BYTE, 0);
+ src = DREG(insn, 9);
+ /* shift in [0..63] */
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, src, 63);
+ t1 = tcg_temp_new_i32();
+ if (insn & 8) {
+ tcg_gen_andi_i32(t1, src, 7);
+ rotate(reg, t1, left, 8);
+ /* if shift == 0, clear C */
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ t0, QREG_CC_V /* 0 */,
+ QREG_CC_V /* 0 */, QREG_CC_C);
+ } else {
+ TCGv X;
+ /* modulo 9 */
+ tcg_gen_movi_i32(t1, 9);
+ tcg_gen_remu_i32(t1, t0, t1);
+ X = rotate_x(reg, t1, left, 8);
+ rotate_x_flags(reg, X, 8);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate16_reg)
+{
+ TCGv reg;
+ TCGv src;
+ TCGv t0, t1;
+ int left = (insn & 0x100);
+
+ reg = gen_extend(DREG(insn, 0), OS_WORD, 0);
+ src = DREG(insn, 9);
+ /* shift in [0..63] */
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, src, 63);
+ t1 = tcg_temp_new_i32();
+ if (insn & 8) {
+ tcg_gen_andi_i32(t1, src, 15);
+ rotate(reg, t1, left, 16);
+ /* if shift == 0, clear C */
+ tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
+ t0, QREG_CC_V /* 0 */,
+ QREG_CC_V /* 0 */, QREG_CC_C);
+ } else {
+ TCGv X;
+ /* modulo 17 */
+ tcg_gen_movi_i32(t1, 17);
+ tcg_gen_remu_i32(t1, t0, t1);
+ X = rotate_x(reg, t1, left, 16);
+ rotate_x_flags(reg, X, 16);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(rotate_mem)
+{
+ TCGv src;
+ TCGv addr;
+ TCGv shift;
+ int left = (insn & 0x100);
+
+ SRC_EA(env, src, OS_WORD, 0, &addr);
+
+ shift = tcg_const_i32(1);
+ if (insn & 0x0200) {
+ rotate(src, shift, left, 16);
+ } else {
+ TCGv X = rotate_x(src, shift, left, 16);
+ rotate_x_flags(src, X, 16);
+ tcg_temp_free(X);
+ }
+ tcg_temp_free(shift);
+ DEST_EA(env, insn, OS_WORD, src, &addr);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(bfext_reg)
+{
+ int ext = read_im16(env, s);
+ int is_sign = insn & 0x200;
+ TCGv src = DREG(insn, 0);
+ TCGv dst = DREG(ext, 12);
+ int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
+ int ofs = extract32(ext, 6, 5); /* big bit-endian */
+ int pos = 32 - ofs - len; /* little bit-endian */
+ TCGv tmp = tcg_temp_new();
+ TCGv shift;
+
+ /* In general, we're going to rotate the field so that it's at the
+ top of the word and then right-shift by the compliment of the
+ width to extend the field. */
+ if (ext & 0x20) {
+ /* Variable width. */
+ if (ext & 0x800) {
+ /* Variable offset. */
+ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
+ tcg_gen_rotl_i32(tmp, src, tmp);
+ } else {
+ tcg_gen_rotli_i32(tmp, src, ofs);
+ }
+
+ shift = tcg_temp_new();
+ tcg_gen_neg_i32(shift, DREG(ext, 0));
+ tcg_gen_andi_i32(shift, shift, 31);
+ tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
+ if (is_sign) {
+ tcg_gen_mov_i32(dst, QREG_CC_N);
+ } else {
+ tcg_gen_shr_i32(dst, tmp, shift);
+ }
+ tcg_temp_free(shift);
} else {
- if (insn & 8) {
- gen_helper_shr_cc(reg, cpu_env, reg, shift);
+ /* Immediate width. */
+ if (ext & 0x800) {
+ /* Variable offset */
+ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
+ tcg_gen_rotl_i32(tmp, src, tmp);
+ src = tmp;
+ pos = 32 - len;
+ } else {
+ /* Immediate offset. If the field doesn't wrap around the
+ end of the word, rely on (s)extract completely. */
+ if (pos < 0) {
+ tcg_gen_rotli_i32(tmp, src, ofs);
+ src = tmp;
+ pos = 32 - len;
+ }
+ }
+
+ tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
+ if (is_sign) {
+ tcg_gen_mov_i32(dst, QREG_CC_N);
} else {
- gen_helper_sar_cc(reg, cpu_env, reg, shift);
+ tcg_gen_extract_i32(dst, src, pos, len);
}
}
- set_cc_op(s, CC_OP_FLAGS);
+
+ tcg_temp_free(tmp);
+ set_cc_op(s, CC_OP_LOGIC);
+}
+
+DISAS_INSN(bfext_mem)
+{
+ int ext = read_im16(env, s);
+ int is_sign = insn & 0x200;
+ TCGv dest = DREG(ext, 12);
+ TCGv addr, len, ofs;
+
+ addr = gen_lea(env, s, insn, OS_UNSIZED);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x20) {
+ len = DREG(ext, 0);
+ } else {
+ len = tcg_const_i32(extract32(ext, 0, 5));
+ }
+ if (ext & 0x800) {
+ ofs = DREG(ext, 6);
+ } else {
+ ofs = tcg_const_i32(extract32(ext, 6, 5));
+ }
+
+ if (is_sign) {
+ gen_helper_bfexts_mem(dest, cpu_env, addr, ofs, len);
+ tcg_gen_mov_i32(QREG_CC_N, dest);
+ } else {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ gen_helper_bfextu_mem(tmp, cpu_env, addr, ofs, len);
+ tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ if (!(ext & 0x20)) {
+ tcg_temp_free(len);
+ }
+ if (!(ext & 0x800)) {
+ tcg_temp_free(ofs);
+ }
+}
+
+DISAS_INSN(bfop_reg)
+{
+ int ext = read_im16(env, s);
+ TCGv src = DREG(insn, 0);
+ int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
+ int ofs = extract32(ext, 6, 5); /* big bit-endian */
+ TCGv mask, tofs, tlen;
+
+ TCGV_UNUSED(tofs);
+ TCGV_UNUSED(tlen);
+ if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
+ tofs = tcg_temp_new();
+ tlen = tcg_temp_new();
+ }
+
+ if ((ext & 0x820) == 0) {
+ /* Immediate width and offset. */
+ uint32_t maski = 0x7fffffffu >> (len - 1);
+ if (ofs + len <= 32) {
+ tcg_gen_shli_i32(QREG_CC_N, src, ofs);
+ } else {
+ tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
+ }
+ tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
+ mask = tcg_const_i32(ror32(maski, ofs));
+ if (!TCGV_IS_UNUSED(tofs)) {
+ tcg_gen_movi_i32(tofs, ofs);
+ tcg_gen_movi_i32(tlen, len);
+ }
+ } else {
+ TCGv tmp = tcg_temp_new();
+ if (ext & 0x20) {
+ /* Variable width */
+ tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
+ tcg_gen_andi_i32(tmp, tmp, 31);
+ mask = tcg_const_i32(0x7fffffffu);
+ tcg_gen_shr_i32(mask, mask, tmp);
+ if (!TCGV_IS_UNUSED(tlen)) {
+ tcg_gen_addi_i32(tlen, tmp, 1);
+ }
+ } else {
+ /* Immediate width */
+ mask = tcg_const_i32(0x7fffffffu >> (len - 1));
+ if (!TCGV_IS_UNUSED(tlen)) {
+ tcg_gen_movi_i32(tlen, len);
+ }
+ }
+ if (ext & 0x800) {
+ /* Variable offset */
+ tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
+ tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
+ tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
+ tcg_gen_rotr_i32(mask, mask, tmp);
+ if (!TCGV_IS_UNUSED(tofs)) {
+ tcg_gen_mov_i32(tofs, tmp);
+ }
+ } else {
+ /* Immediate offset (and variable width) */
+ tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
+ tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
+ tcg_gen_rotri_i32(mask, mask, ofs);
+ if (!TCGV_IS_UNUSED(tofs)) {
+ tcg_gen_movi_i32(tofs, ofs);
+ }
+ }
+ tcg_temp_free(tmp);
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ switch (insn & 0x0f00) {
+ case 0x0a00: /* bfchg */
+ tcg_gen_eqv_i32(src, src, mask);
+ break;
+ case 0x0c00: /* bfclr */
+ tcg_gen_and_i32(src, src, mask);
+ break;
+ case 0x0d00: /* bfffo */
+ gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
+ tcg_temp_free(tlen);
+ tcg_temp_free(tofs);
+ break;
+ case 0x0e00: /* bfset */
+ tcg_gen_orc_i32(src, src, mask);
+ break;
+ case 0x0800: /* bftst */
+ /* flags already set; no other work to do. */
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free(mask);
+}
+
+DISAS_INSN(bfop_mem)
+{
+ int ext = read_im16(env, s);
+ TCGv addr, len, ofs;
+ TCGv_i64 t64;
+
+ addr = gen_lea(env, s, insn, OS_UNSIZED);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x20) {
+ len = DREG(ext, 0);
+ } else {
+ len = tcg_const_i32(extract32(ext, 0, 5));
+ }
+ if (ext & 0x800) {
+ ofs = DREG(ext, 6);
+ } else {
+ ofs = tcg_const_i32(extract32(ext, 6, 5));
+ }
+
+ switch (insn & 0x0f00) {
+ case 0x0a00: /* bfchg */
+ gen_helper_bfchg_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ case 0x0c00: /* bfclr */
+ gen_helper_bfclr_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ case 0x0d00: /* bfffo */
+ t64 = tcg_temp_new_i64();
+ gen_helper_bfffo_mem(t64, cpu_env, addr, ofs, len);
+ tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
+ tcg_temp_free_i64(t64);
+ break;
+ case 0x0e00: /* bfset */
+ gen_helper_bfset_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ case 0x0800: /* bftst */
+ gen_helper_bfexts_mem(QREG_CC_N, cpu_env, addr, ofs, len);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ if (!(ext & 0x20)) {
+ tcg_temp_free(len);
+ }
+ if (!(ext & 0x800)) {
+ tcg_temp_free(ofs);
+ }
+}
+
+DISAS_INSN(bfins_reg)
+{
+ int ext = read_im16(env, s);
+ TCGv dst = DREG(insn, 0);
+ TCGv src = DREG(ext, 12);
+ int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
+ int ofs = extract32(ext, 6, 5); /* big bit-endian */
+ int pos = 32 - ofs - len; /* little bit-endian */
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+
+ if (ext & 0x20) {
+ /* Variable width */
+ tcg_gen_neg_i32(tmp, DREG(ext, 0));
+ tcg_gen_andi_i32(tmp, tmp, 31);
+ tcg_gen_shl_i32(QREG_CC_N, src, tmp);
+ } else {
+ /* Immediate width */
+ tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
+ }
+ set_cc_op(s, CC_OP_LOGIC);
+
+ /* Immediate width and offset */
+ if ((ext & 0x820) == 0) {
+ /* Check for suitability for deposit. */
+ if (pos >= 0) {
+ tcg_gen_deposit_i32(dst, dst, src, pos, len);
+ } else {
+ uint32_t maski = -2U << (len - 1);
+ uint32_t roti = (ofs + len) & 31;
+ tcg_gen_andi_i32(tmp, src, ~maski);
+ tcg_gen_rotri_i32(tmp, tmp, roti);
+ tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
+ tcg_gen_or_i32(dst, dst, tmp);
+ }
+ } else {
+ TCGv mask = tcg_temp_new();
+ TCGv rot = tcg_temp_new();
+
+ if (ext & 0x20) {
+ /* Variable width */
+ tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
+ tcg_gen_andi_i32(rot, rot, 31);
+ tcg_gen_movi_i32(mask, -2);
+ tcg_gen_shl_i32(mask, mask, rot);
+ tcg_gen_mov_i32(rot, DREG(ext, 0));
+ tcg_gen_andc_i32(tmp, src, mask);
+ } else {
+ /* Immediate width (variable offset) */
+ uint32_t maski = -2U << (len - 1);
+ tcg_gen_andi_i32(tmp, src, ~maski);
+ tcg_gen_movi_i32(mask, maski);
+ tcg_gen_movi_i32(rot, len & 31);
+ }
+ if (ext & 0x800) {
+ /* Variable offset */
+ tcg_gen_add_i32(rot, rot, DREG(ext, 6));
+ } else {
+ /* Immediate offset (variable width) */
+ tcg_gen_addi_i32(rot, rot, ofs);
+ }
+ tcg_gen_andi_i32(rot, rot, 31);
+ tcg_gen_rotr_i32(mask, mask, rot);
+ tcg_gen_rotr_i32(tmp, tmp, rot);
+ tcg_gen_and_i32(dst, dst, mask);
+ tcg_gen_or_i32(dst, dst, tmp);
+
+ tcg_temp_free(rot);
+ tcg_temp_free(mask);
+ }
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(bfins_mem)
+{
+ int ext = read_im16(env, s);
+ TCGv src = DREG(ext, 12);
+ TCGv addr, len, ofs;
+
+ addr = gen_lea(env, s, insn, OS_UNSIZED);
+ if (IS_NULL_QREG(addr)) {
+ gen_addr_fault(s);
+ return;
+ }
+
+ if (ext & 0x20) {
+ len = DREG(ext, 0);
+ } else {
+ len = tcg_const_i32(extract32(ext, 0, 5));
+ }
+ if (ext & 0x800) {
+ ofs = DREG(ext, 6);
+ } else {
+ ofs = tcg_const_i32(extract32(ext, 6, 5));
+ }
+
+ gen_helper_bfins_mem(QREG_CC_N, cpu_env, addr, src, ofs, len);
+ set_cc_op(s, CC_OP_LOGIC);
+
+ if (!(ext & 0x20)) {
+ tcg_temp_free(len);
+ }
+ if (!(ext & 0x800)) {
+ tcg_temp_free(ofs);
+ }
}
DISAS_INSN(ff1)
@@ -3312,6 +4847,11 @@ void register_m68k_insns (CPUM68KState *env)
BASE(bitop_im, 08c0, ffc0);
INSN(arith_im, 0a80, fff8, CF_ISA_A);
INSN(arith_im, 0a00, ff00, M68000);
+ INSN(cas, 0ac0, ffc0, CAS);
+ INSN(cas, 0cc0, ffc0, CAS);
+ INSN(cas, 0ec0, ffc0, CAS);
+ INSN(cas2w, 0cfc, ffff, CAS);
+ INSN(cas2l, 0efc, ffff, CAS);
BASE(move, 1000, f000);
BASE(move, 2000, f000);
BASE(move, 3000, f000);
@@ -3334,11 +4874,14 @@ void register_m68k_insns (CPUM68KState *env)
INSN(not, 4600, ff00, M68000);
INSN(undef, 46c0, ffc0, M68000);
INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
+ INSN(nbcd, 4800, ffc0, M68000);
INSN(linkl, 4808, fff8, M68000);
BASE(pea, 4840, ffc0);
BASE(swap, 4840, fff8);
INSN(bkpt, 4848, fff8, BKPT);
- BASE(movem, 48c0, fbc0);
+ INSN(movem, 48d0, fbf8, CF_ISA_A);
+ INSN(movem, 48e8, fbf8, CF_ISA_A);
+ INSN(movem, 4880, fb80, M68000);
BASE(ext, 4880, fff8);
BASE(ext, 48c0, fff8);
BASE(ext, 49c0, fff8);
@@ -3385,6 +4928,8 @@ void register_m68k_insns (CPUM68KState *env)
INSN(mvzs, 7100, f100, CF_ISA_B);
BASE(or, 8000, f000);
BASE(divw, 80c0, f0c0);
+ INSN(sbcd_reg, 8100, f1f8, M68000);
+ INSN(sbcd_mem, 8108, f1f8, M68000);
BASE(addsub, 9000, f000);
INSN(undef, 90c0, f0c0, CF_ISA_A);
INSN(subx_reg, 9180, f1f8, CF_ISA_A);
@@ -3414,6 +4959,7 @@ void register_m68k_insns (CPUM68KState *env)
INSN(cmpa, b1c0, f1c0, CF_ISA_A);
INSN(cmp, b000, f100, M68000);
INSN(eor, b100, f100, M68000);
+ INSN(cmpm, b108, f138, M68000);
INSN(cmpa, b0c0, f0c0, M68000);
INSN(eor, b180, f1c0, CF_ISA_A);
BASE(and, c000, f000);
@@ -3421,6 +4967,8 @@ void register_m68k_insns (CPUM68KState *env)
INSN(exg_aa, c148, f1f8, M68000);
INSN(exg_da, c188, f1f8, M68000);
BASE(mulw, c0c0, f0c0);
+ INSN(abcd_reg, c100, f1f8, M68000);
+ INSN(abcd_mem, c108, f1f8, M68000);
BASE(addsub, d000, f000);
INSN(undef, d0c0, f0c0, CF_ISA_A);
INSN(addx_reg, d180, f1f8, CF_ISA_A);
@@ -3430,6 +4978,34 @@ void register_m68k_insns (CPUM68KState *env)
INSN(adda, d0c0, f0c0, M68000);
INSN(shift_im, e080, f0f0, CF_ISA_A);
INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
+ INSN(shift8_im, e000, f0f0, M68000);
+ INSN(shift16_im, e040, f0f0, M68000);
+ INSN(shift_im, e080, f0f0, M68000);
+ INSN(shift8_reg, e020, f0f0, M68000);
+ INSN(shift16_reg, e060, f0f0, M68000);
+ INSN(shift_reg, e0a0, f0f0, M68000);
+ INSN(shift_mem, e0c0, fcc0, M68000);
+ INSN(rotate_im, e090, f0f0, M68000);
+ INSN(rotate8_im, e010, f0f0, M68000);
+ INSN(rotate16_im, e050, f0f0, M68000);
+ INSN(rotate_reg, e0b0, f0f0, M68000);
+ INSN(rotate8_reg, e030, f0f0, M68000);
+ INSN(rotate16_reg, e070, f0f0, M68000);
+ INSN(rotate_mem, e4c0, fcc0, M68000);
+ INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
+ INSN(bfext_reg, e9c0, fdf8, BITFIELD);
+ INSN(bfins_mem, efc0, ffc0, BITFIELD);
+ INSN(bfins_reg, efc0, fff8, BITFIELD);
+ INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
+ INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
+ INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
+ INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
+ INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
+ INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
+ INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
+ INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
+ INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
+ INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
INSN(undef_fpu, f000, f000, CF_ISA_A);
INSN(fpu, f200, ffc0, CF_FPU);
INSN(fbcc, f280, ffc0, CF_FPU);
@@ -3446,11 +5022,9 @@ void register_m68k_insns (CPUM68KState *env)
write back the result to memory before setting the condition codes. */
static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
{
- uint16_t insn;
-
- insn = read_im16(env, s);
-
+ uint16_t insn = read_im16(env, s);
opcode_table[insn](env, s, insn);
+ do_writebacks(s);
}
/* generate intermediate code for basic block 'tb'. */
@@ -3478,6 +5052,7 @@ void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
dc->fpcr = env->fpcr;
dc->user = (env->sr & SR_S) == 0;
dc->done_mac = 0;
+ dc->writeback_mask = 0;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 389c7b691e..3d58869716 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -103,9 +103,8 @@ static void mb_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUMBState, pvr));
+ memset(env, 0, offsetof(CPUMBState, end_reset_fields));
env->res_addr = RES_ADDR_NONE;
- tlb_flush(s, 1);
/* Disable stack protector. */
env->shr = ~0;
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index beb75ffd26..bf6963bcb7 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -267,6 +267,9 @@ struct CPUMBState {
struct microblaze_mmu mmu;
#endif
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* These fields are preserved on reset. */
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index bd13826de0..71a6c0858d 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -3,7 +3,6 @@ DEF_HELPER_1(debug, void, env)
DEF_HELPER_FLAGS_3(carry, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
DEF_HELPER_2(cmp, i32, i32, i32)
DEF_HELPER_2(cmpu, i32, i32, i32)
-DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_3(divs, i32, env, i32, i32)
DEF_HELPER_3(divu, i32, env, i32, i32)
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index a22a496ebb..a0f06758f8 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -255,7 +255,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
/* Changes to the zone protection reg flush the QEMU TLB.
Fortunately, these are very uncommon. */
if (v != env->mmu.regs[rn]) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
env->mmu.regs[rn] = v;
break;
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 4a856e6204..1e07e21c1c 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -145,11 +145,6 @@ uint32_t helper_cmpu(uint32_t a, uint32_t b)
return t;
}
-uint32_t helper_clz(uint32_t t0)
-{
- return clz32(t0);
-}
-
uint32_t helper_carry(uint32_t a, uint32_t b, uint32_t cf)
{
return compute_carry(a, b, cf);
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index de2090ac71..0bb609513c 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -768,7 +768,7 @@ static void dec_bit(DisasContext *dc)
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
- gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
+ tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
}
break;
case 0x1e0:
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
index 65ca607f88..1bb66b7a5a 100644
--- a/target/mips/cpu.c
+++ b/target/mips/cpu.c
@@ -100,8 +100,7 @@ static void mips_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, offsetof(CPUMIPSState, mvp));
- tlb_flush(s, 1);
+ memset(env, 0, offsetof(CPUMIPSState, end_reset_fields));
cpu_state_reset(env);
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
index 5182dc74ff..e1c78f55ec 100644
--- a/target/mips/cpu.h
+++ b/target/mips/cpu.h
@@ -607,6 +607,9 @@ struct CPUMIPSState {
uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */
int insn_flags; /* Supported instruction set */
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
@@ -1051,7 +1054,7 @@ static inline void compute_hflags(CPUMIPSState *env)
}
}
-void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global);
+void cpu_mips_tlb_flush(CPUMIPSState *env);
void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
diff --git a/target/mips/helper.c b/target/mips/helper.c
index c864b15b97..d2e77958fd 100644
--- a/target/mips/helper.c
+++ b/target/mips/helper.c
@@ -223,12 +223,12 @@ static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
return ret;
}
-void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global)
+void cpu_mips_tlb_flush(CPUMIPSState *env)
{
MIPSCPU *cpu = mips_env_get_cpu(env);
/* Flush qemu's TLB and discard all shadowed entries. */
- tlb_flush(CPU(cpu), flush_global);
+ tlb_flush(CPU(cpu));
env->tlb->tlb_in_use = env->tlb->nb_tlb;
}
@@ -290,7 +290,7 @@ void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
#if defined(TARGET_MIPS64)
if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
/* Access to at least one of the 64-bit segments has been disabled */
- cpu_mips_tlb_flush(env, 1);
+ cpu_mips_tlb_flush(env);
}
#endif
if (env->CP0_Config3 & (1 << CP0C3_MT)) {
diff --git a/target/mips/helper.h b/target/mips/helper.h
index 666936c81b..60efa01194 100644
--- a/target/mips/helper.h
+++ b/target/mips/helper.h
@@ -20,13 +20,6 @@ DEF_HELPER_4(scd, tl, env, tl, tl, int)
#endif
#endif
-DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
-#ifdef TARGET_MIPS64
-DEF_HELPER_FLAGS_1(dclo, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(dclz, TCG_CALL_NO_RWG_SE, tl, tl)
-#endif
-
DEF_HELPER_3(muls, tl, env, tl, tl)
DEF_HELPER_3(mulsu, tl, env, tl, tl)
DEF_HELPER_3(macc, tl, env, tl, tl)
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
index dcf5fbba0c..998c3412c3 100644
--- a/target/mips/kvm.c
+++ b/target/mips/kvm.c
@@ -55,6 +55,11 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
return 0;
}
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
MIPSCPU *cpu = MIPS_CPU(cs);
diff --git a/target/mips/machine.c b/target/mips/machine.c
index d20d948457..38c8fe9328 100644
--- a/target/mips/machine.c
+++ b/target/mips/machine.c
@@ -19,7 +19,7 @@ static int cpu_post_load(void *opaque, int version_id)
/* FPU state */
-static int get_fpr(QEMUFile *f, void *pv, size_t size)
+static int get_fpr(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
int i;
fpr_t *v = pv;
@@ -30,7 +30,8 @@ static int get_fpr(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_fpr(QEMUFile *f, void *pv, size_t size)
+static int put_fpr(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
int i;
fpr_t *v = pv;
@@ -38,6 +39,8 @@ static void put_fpr(QEMUFile *f, void *pv, size_t size)
for (i = 0; i < MSA_WRLEN/64; i++) {
qemu_put_sbe64s(f, &v->wr.d[i]);
}
+
+ return 0;
}
const VMStateInfo vmstate_info_fpr = {
@@ -124,7 +127,7 @@ const VMStateDescription vmstate_mvp = {
/* TLB state */
-static int get_tlb(QEMUFile *f, void *pv, size_t size)
+static int get_tlb(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
r4k_tlb_t *v = pv;
uint16_t flags;
@@ -151,7 +154,8 @@ static int get_tlb(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_tlb(QEMUFile *f, void *pv, size_t size)
+static int put_tlb(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
r4k_tlb_t *v = pv;
@@ -175,6 +179,8 @@ static void put_tlb(QEMUFile *f, void *pv, size_t size)
qemu_put_be16s(f, &flags);
qemu_put_be64s(f, &v->PFN[0]);
qemu_put_be64s(f, &v->PFN[1]);
+
+ return 0;
}
const VMStateInfo vmstate_info_tlb = {
diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c
index 7af4c2f084..b683fcb025 100644
--- a/target/mips/op_helper.c
+++ b/target/mips/op_helper.c
@@ -103,28 +103,6 @@ HELPER_ST(sd, stq, uint64_t)
#endif
#undef HELPER_ST
-target_ulong helper_clo (target_ulong arg1)
-{
- return clo32(arg1);
-}
-
-target_ulong helper_clz (target_ulong arg1)
-{
- return clz32(arg1);
-}
-
-#if defined(TARGET_MIPS64)
-target_ulong helper_dclo (target_ulong arg1)
-{
- return clo64(arg1);
-}
-
-target_ulong helper_dclz (target_ulong arg1)
-{
- return clz64(arg1);
-}
-#endif /* TARGET_MIPS64 */
-
/* 64 bits arithmetic for 32 bits hosts */
static inline uint64_t get_HILO(CPUMIPSState *env)
{
@@ -1431,7 +1409,7 @@ void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
/* If the ASID changes, flush qemu's TLB. */
if ((old & env->CP0_EntryHi_ASID_mask) !=
(val & env->CP0_EntryHi_ASID_mask)) {
- cpu_mips_tlb_flush(env, 1);
+ cpu_mips_tlb_flush(env);
}
}
@@ -2021,7 +1999,7 @@ void r4k_helper_tlbinv(CPUMIPSState *env)
tlb->EHINV = 1;
}
}
- cpu_mips_tlb_flush(env, 1);
+ cpu_mips_tlb_flush(env);
}
void r4k_helper_tlbinvf(CPUMIPSState *env)
@@ -2031,7 +2009,7 @@ void r4k_helper_tlbinvf(CPUMIPSState *env)
for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
}
- cpu_mips_tlb_flush(env, 1);
+ cpu_mips_tlb_flush(env);
}
void r4k_helper_tlbwi(CPUMIPSState *env)
@@ -2145,7 +2123,7 @@ void r4k_helper_tlbr(CPUMIPSState *env)
/* If this will change the current ASID, flush qemu's TLB. */
if (ASID != tlb->ASID)
- cpu_mips_tlb_flush (env, 1);
+ cpu_mips_tlb_flush(env);
r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
diff --git a/target/mips/translate.c b/target/mips/translate.c
index 57b824ff2d..7f8ecf42c2 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -3626,29 +3626,38 @@ static void gen_cl (DisasContext *ctx, uint32_t opc,
/* Treat as NOP. */
return;
}
- t0 = tcg_temp_new();
+ t0 = cpu_gpr[rd];
gen_load_gpr(t0, rs);
+
switch (opc) {
case OPC_CLO:
case R6_OPC_CLO:
- gen_helper_clo(cpu_gpr[rd], t0);
+#if defined(TARGET_MIPS64)
+ case OPC_DCLO:
+ case R6_OPC_DCLO:
+#endif
+ tcg_gen_not_tl(t0, t0);
break;
+ }
+
+ switch (opc) {
+ case OPC_CLO:
+ case R6_OPC_CLO:
case OPC_CLZ:
case R6_OPC_CLZ:
- gen_helper_clz(cpu_gpr[rd], t0);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_clzi_tl(t0, t0, TARGET_LONG_BITS);
+ tcg_gen_subi_tl(t0, t0, TARGET_LONG_BITS - 32);
break;
#if defined(TARGET_MIPS64)
case OPC_DCLO:
case R6_OPC_DCLO:
- gen_helper_dclo(cpu_gpr[rd], t0);
- break;
case OPC_DCLZ:
case R6_OPC_DCLZ:
- gen_helper_dclz(cpu_gpr[rd], t0);
+ tcg_gen_clzi_i64(t0, t0, 64);
break;
#endif
}
- tcg_temp_free(t0);
}
/* Godson integer instructions */
@@ -4488,11 +4497,12 @@ static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt,
if (lsb + msb > 31) {
goto fail;
}
- tcg_gen_shri_tl(t0, t1, lsb);
if (msb != 31) {
- tcg_gen_andi_tl(t0, t0, (1U << (msb + 1)) - 1);
+ tcg_gen_extract_tl(t0, t1, lsb, msb + 1);
} else {
- tcg_gen_ext32s_tl(t0, t0);
+ /* The two checks together imply that lsb == 0,
+ so this is a simple sign-extension. */
+ tcg_gen_ext32s_tl(t0, t1);
}
break;
#if defined(TARGET_MIPS64)
@@ -4507,10 +4517,7 @@ static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt,
if (lsb + msb > 63) {
goto fail;
}
- tcg_gen_shri_tl(t0, t1, lsb);
- if (msb != 63) {
- tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1)) - 1);
- }
+ tcg_gen_extract_tl(t0, t1, lsb, msb + 1);
break;
#endif
case OPC_INS:
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
index b0be4a7551..927b1a1e44 100644
--- a/target/moxie/cpu.c
+++ b/target/moxie/cpu.c
@@ -45,10 +45,8 @@ static void moxie_cpu_reset(CPUState *s)
mcc->parent_reset(s);
- memset(env, 0, sizeof(CPUMoxieState));
+ memset(env, 0, offsetof(CPUMoxieState, end_reset_fields));
env->pc = 0x1000;
-
- tlb_flush(s, 1);
}
static void moxie_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
diff --git a/target/moxie/cpu.h b/target/moxie/cpu.h
index 3e880facf4..8991aaef9a 100644
--- a/target/moxie/cpu.h
+++ b/target/moxie/cpu.h
@@ -56,6 +56,9 @@ typedef struct CPUMoxieState {
void *irq[8];
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
} CPUMoxieState;
diff --git a/target/nios2/Makefile.objs b/target/nios2/Makefile.objs
new file mode 100644
index 0000000000..2a11c5ce08
--- /dev/null
+++ b/target/nios2/Makefile.objs
@@ -0,0 +1,4 @@
+obj-y += translate.o op_helper.o helper.o cpu.o mmu.o
+obj-$(CONFIG_SOFTMMU) += monitor.o
+
+$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
new file mode 100644
index 0000000000..d56bb7245a
--- /dev/null
+++ b/target/nios2/cpu.c
@@ -0,0 +1,237 @@
+/*
+ * QEMU Nios II CPU
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "exec/log.h"
+#include "exec/gdbstub.h"
+#include "hw/qdev-properties.h"
+
+static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ env->regs[R_PC] = value;
+}
+
+static bool nios2_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
+/* CPUClass::reset() */
+static void nios2_cpu_reset(CPUState *cs)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(cpu);
+ CPUNios2State *env = &cpu->env;
+
+ if (qemu_loglevel_mask(CPU_LOG_RESET)) {
+ qemu_log("CPU Reset (CPU %d)\n", cs->cpu_index);
+ log_cpu_state(cs, 0);
+ }
+
+ ncc->parent_reset(cs);
+
+ memset(env->regs, 0, sizeof(uint32_t) * NUM_CORE_REGS);
+ env->regs[R_PC] = cpu->reset_addr;
+
+#if defined(CONFIG_USER_ONLY)
+ /* Start in user mode with interrupts enabled. */
+ env->regs[CR_STATUS] = CR_STATUS_U | CR_STATUS_PIE;
+#else
+ env->regs[CR_STATUS] = 0;
+#endif
+}
+
+static void nios2_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ Nios2CPU *cpu = NIOS2_CPU(obj);
+ CPUNios2State *env = &cpu->env;
+ static bool tcg_initialized;
+
+ cs->env_ptr = env;
+
+#if !defined(CONFIG_USER_ONLY)
+ mmu_init(env);
+#endif
+
+ if (tcg_enabled() && !tcg_initialized) {
+ tcg_initialized = true;
+ nios2_tcg_init();
+ }
+}
+
+Nios2CPU *cpu_nios2_init(const char *cpu_model)
+{
+ Nios2CPU *cpu = NIOS2_CPU(object_new(TYPE_NIOS2_CPU));
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+static void nios2_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ Nios2CPUClass *ncc = NIOS2_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ ncc->parent_realize(dev, errp);
+}
+
+static bool nios2_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->regs[CR_STATUS] & CR_STATUS_PIE)) {
+ cs->exception_index = EXCP_IRQ;
+ nios2_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+
+static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ /* NOTE: NiosII R2 is not supported yet. */
+ info->mach = bfd_arch_nios2;
+#ifdef TARGET_WORDS_BIGENDIAN
+ info->print_insn = print_insn_big_nios2;
+#else
+ info->print_insn = print_insn_little_nios2;
+#endif
+}
+
+static int nios2_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ if (n < 32) { /* GP regs */
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ } else if (n == 32) { /* PC */
+ return gdb_get_reg32(mem_buf, env->regs[R_PC]);
+ } else if (n < 49) { /* Status regs */
+ return gdb_get_reg32(mem_buf, env->regs[n - 1]);
+ }
+
+ /* Invalid regs */
+ return 0;
+}
+
+static int nios2_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ if (n < 32) { /* GP regs */
+ env->regs[n] = ldl_p(mem_buf);
+ } else if (n == 32) { /* PC */
+ env->regs[R_PC] = ldl_p(mem_buf);
+ } else if (n < 49) { /* Status regs */
+ env->regs[n - 1] = ldl_p(mem_buf);
+ }
+
+ return 4;
+}
+
+static Property nios2_properties[] = {
+ DEFINE_PROP_BOOL("mmu_present", Nios2CPU, mmu_present, true),
+ /* ALTR,pid-num-bits */
+ DEFINE_PROP_UINT32("mmu_pid_num_bits", Nios2CPU, pid_num_bits, 8),
+ /* ALTR,tlb-num-ways */
+ DEFINE_PROP_UINT32("mmu_tlb_num_ways", Nios2CPU, tlb_num_ways, 16),
+ /* ALTR,tlb-num-entries */
+ DEFINE_PROP_UINT32("mmu_pid_num_entries", Nios2CPU, tlb_num_entries, 256),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+
+static void nios2_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ Nios2CPUClass *ncc = NIOS2_CPU_CLASS(oc);
+
+ ncc->parent_realize = dc->realize;
+ dc->realize = nios2_cpu_realizefn;
+ dc->props = nios2_properties;
+ ncc->parent_reset = cc->reset;
+ cc->reset = nios2_cpu_reset;
+
+ cc->has_work = nios2_cpu_has_work;
+ cc->do_interrupt = nios2_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = nios2_cpu_exec_interrupt;
+ cc->dump_state = nios2_cpu_dump_state;
+ cc->set_pc = nios2_cpu_set_pc;
+ cc->disas_set_info = nios2_cpu_disas_set_info;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = nios2_cpu_handle_mmu_fault;
+#else
+ cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
+ cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
+#endif
+ cc->gdb_read_register = nios2_cpu_gdb_read_register;
+ cc->gdb_write_register = nios2_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 49;
+}
+
+static const TypeInfo nios2_cpu_type_info = {
+ .name = TYPE_NIOS2_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(Nios2CPU),
+ .instance_init = nios2_cpu_initfn,
+ .class_size = sizeof(Nios2CPUClass),
+ .class_init = nios2_cpu_class_init,
+};
+
+static void nios2_cpu_register_types(void)
+{
+ type_register_static(&nios2_cpu_type_info);
+}
+
+type_init(nios2_cpu_register_types)
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
new file mode 100644
index 0000000000..13931f3f0b
--- /dev/null
+++ b/target/nios2/cpu.h
@@ -0,0 +1,272 @@
+/*
+ * Altera Nios II virtual CPU header
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef CPU_NIOS2_H
+#define CPU_NIOS2_H
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUNios2State
+
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+#include "qom/cpu.h"
+struct CPUNios2State;
+typedef struct CPUNios2State CPUNios2State;
+#if !defined(CONFIG_USER_ONLY)
+#include "mmu.h"
+#endif
+
+#define TYPE_NIOS2_CPU "nios2-cpu"
+
+#define NIOS2_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(Nios2CPUClass, (klass), TYPE_NIOS2_CPU)
+#define NIOS2_CPU(obj) \
+ OBJECT_CHECK(Nios2CPU, (obj), TYPE_NIOS2_CPU)
+#define NIOS2_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(Nios2CPUClass, (obj), TYPE_NIOS2_CPU)
+
+/**
+ * Nios2CPUClass:
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Nios2 CPU model.
+ */
+typedef struct Nios2CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} Nios2CPUClass;
+
+#define TARGET_HAS_ICE 1
+
+/* Configuration options for Nios II */
+#define RESET_ADDRESS 0x00000000
+#define EXCEPTION_ADDRESS 0x00000004
+#define FAST_TLB_MISS_ADDRESS 0x00000008
+
+
+/* GP regs + CR regs + PC */
+#define NUM_CORE_REGS (32 + 32 + 1)
+
+/* General purpose register aliases */
+#define R_ZERO 0
+#define R_AT 1
+#define R_RET0 2
+#define R_RET1 3
+#define R_ARG0 4
+#define R_ARG1 5
+#define R_ARG2 6
+#define R_ARG3 7
+#define R_ET 24
+#define R_BT 25
+#define R_GP 26
+#define R_SP 27
+#define R_FP 28
+#define R_EA 29
+#define R_BA 30
+#define R_RA 31
+
+/* Control register aliases */
+#define CR_BASE 32
+#define CR_STATUS (CR_BASE + 0)
+#define CR_STATUS_PIE (1 << 0)
+#define CR_STATUS_U (1 << 1)
+#define CR_STATUS_EH (1 << 2)
+#define CR_STATUS_IH (1 << 3)
+#define CR_STATUS_IL (63 << 4)
+#define CR_STATUS_CRS (63 << 10)
+#define CR_STATUS_PRS (63 << 16)
+#define CR_STATUS_NMI (1 << 22)
+#define CR_STATUS_RSIE (1 << 23)
+#define CR_ESTATUS (CR_BASE + 1)
+#define CR_BSTATUS (CR_BASE + 2)
+#define CR_IENABLE (CR_BASE + 3)
+#define CR_IPENDING (CR_BASE + 4)
+#define CR_CPUID (CR_BASE + 5)
+#define CR_CTL6 (CR_BASE + 6)
+#define CR_EXCEPTION (CR_BASE + 7)
+#define CR_PTEADDR (CR_BASE + 8)
+#define CR_PTEADDR_PTBASE_SHIFT 22
+#define CR_PTEADDR_PTBASE_MASK (0x3FF << CR_PTEADDR_PTBASE_SHIFT)
+#define CR_PTEADDR_VPN_SHIFT 2
+#define CR_PTEADDR_VPN_MASK (0xFFFFF << CR_PTEADDR_VPN_SHIFT)
+#define CR_TLBACC (CR_BASE + 9)
+#define CR_TLBACC_IGN_SHIFT 25
+#define CR_TLBACC_IGN_MASK (0x7F << CR_TLBACC_IGN_SHIFT)
+#define CR_TLBACC_C (1 << 24)
+#define CR_TLBACC_R (1 << 23)
+#define CR_TLBACC_W (1 << 22)
+#define CR_TLBACC_X (1 << 21)
+#define CR_TLBACC_G (1 << 20)
+#define CR_TLBACC_PFN_MASK 0x000FFFFF
+#define CR_TLBMISC (CR_BASE + 10)
+#define CR_TLBMISC_WAY_SHIFT 20
+#define CR_TLBMISC_WAY_MASK (0xF << CR_TLBMISC_WAY_SHIFT)
+#define CR_TLBMISC_RD (1 << 19)
+#define CR_TLBMISC_WR (1 << 18)
+#define CR_TLBMISC_PID_SHIFT 4
+#define CR_TLBMISC_PID_MASK (0x3FFF << CR_TLBMISC_PID_SHIFT)
+#define CR_TLBMISC_DBL (1 << 3)
+#define CR_TLBMISC_BAD (1 << 2)
+#define CR_TLBMISC_PERM (1 << 1)
+#define CR_TLBMISC_D (1 << 0)
+#define CR_ENCINJ (CR_BASE + 11)
+#define CR_BADADDR (CR_BASE + 12)
+#define CR_CONFIG (CR_BASE + 13)
+#define CR_MPUBASE (CR_BASE + 14)
+#define CR_MPUACC (CR_BASE + 15)
+
+/* Other registers */
+#define R_PC 64
+
+/* Exceptions */
+#define EXCP_BREAK -1
+#define EXCP_RESET 0
+#define EXCP_PRESET 1
+#define EXCP_IRQ 2
+#define EXCP_TRAP 3
+#define EXCP_UNIMPL 4
+#define EXCP_ILLEGAL 5
+#define EXCP_UNALIGN 6
+#define EXCP_UNALIGND 7
+#define EXCP_DIV 8
+#define EXCP_SUPERA 9
+#define EXCP_SUPERI 10
+#define EXCP_SUPERD 11
+#define EXCP_TLBD 12
+#define EXCP_TLBX 13
+#define EXCP_TLBR 14
+#define EXCP_TLBW 15
+#define EXCP_MPUI 16
+#define EXCP_MPUD 17
+
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+
+#define NB_MMU_MODES 2
+
+struct CPUNios2State {
+ uint32_t regs[NUM_CORE_REGS];
+
+#if !defined(CONFIG_USER_ONLY)
+ Nios2MMU mmu;
+
+ uint32_t irq_pending;
+#endif
+
+ CPU_COMMON
+};
+
+/**
+ * Nios2CPU:
+ * @env: #CPUNios2State
+ *
+ * A Nios2 CPU.
+ */
+typedef struct Nios2CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUNios2State env;
+ bool mmu_present;
+ uint32_t pid_num_bits;
+ uint32_t tlb_num_ways;
+ uint32_t tlb_num_entries;
+
+ /* Addresses that are hard-coded in the FPGA build settings */
+ uint32_t reset_addr;
+ uint32_t exception_addr;
+ uint32_t fast_tlb_miss_addr;
+} Nios2CPU;
+
+static inline Nios2CPU *nios2_env_get_cpu(CPUNios2State *env)
+{
+ return NIOS2_CPU(container_of(env, Nios2CPU, env));
+}
+
+#define ENV_GET_CPU(e) CPU(nios2_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(Nios2CPU, env)
+
+void nios2_tcg_init(void);
+Nios2CPU *cpu_nios2_init(const char *cpu_model);
+void nios2_cpu_do_interrupt(CPUState *cs);
+int cpu_nios2_signal_handler(int host_signum, void *pinfo, void *puc);
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUNios2State *env);
+void nios2_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void nios2_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+qemu_irq *nios2_cpu_pic_init(Nios2CPU *cpu);
+void nios2_check_interrupts(CPUNios2State *env);
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define cpu_init(cpu_model) CPU(cpu_nios2_init(cpu_model))
+
+#define cpu_gen_code cpu_nios2_gen_code
+#define cpu_signal_handler cpu_nios2_signal_handler
+
+#define CPU_SAVE_VERSION 1
+
+#define TARGET_PAGE_BITS 12
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_SUPERVISOR_IDX 0
+#define MMU_USER_IDX 1
+
+static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
+{
+ return (env->regs[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX :
+ MMU_SUPERVISOR_IDX;
+}
+
+int nios2_cpu_handle_mmu_fault(CPUState *env, vaddr address,
+ int rw, int mmu_idx);
+
+static inline int cpu_interrupts_enabled(CPUNios2State *env)
+{
+ return env->regs[CR_STATUS] & CR_STATUS_PIE;
+}
+
+#include "exec/cpu-all.h"
+#include "exec/exec-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUNios2State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->regs[R_PC];
+ *cs_base = 0;
+ *flags = (env->regs[CR_STATUS] & (CR_STATUS_EH | CR_STATUS_U));
+}
+
+#endif /* CPU_NIOS2_H */
diff --git a/target/nios2/helper.c b/target/nios2/helper.c
new file mode 100644
index 0000000000..ef9ee05798
--- /dev/null
+++ b/target/nios2/helper.c
@@ -0,0 +1,313 @@
+/*
+ * Altera Nios II helper routines.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "cpu.h"
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qapi/error.h"
+#include "exec/exec-all.h"
+#include "exec/log.h"
+#include "exec/helper-proto.h"
+
+#if defined(CONFIG_USER_ONLY)
+
+void nios2_cpu_do_interrupt(CPUState *cs)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ cs->exception_index = -1;
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+}
+
+int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx)
+{
+ cs->exception_index = 0xaa;
+ /* Page 0x1000 is kuser helper */
+ if (address < 0x1000 || address >= 0x2000) {
+ cpu_dump_state(cs, stderr, fprintf, 0);
+ }
+ return 1;
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+void nios2_cpu_do_interrupt(CPUState *cs)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ switch (cs->exception_index) {
+ case EXCP_IRQ:
+ assert(env->regs[CR_STATUS] & CR_STATUS_PIE);
+
+ qemu_log_mask(CPU_LOG_INT, "interrupt at pc=%x\n", env->regs[R_PC]);
+
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] |= CR_STATUS_IH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_TLBD:
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ qemu_log_mask(CPU_LOG_INT, "TLB MISS (fast) at pc=%x\n",
+ env->regs[R_PC]);
+
+ /* Fast TLB miss */
+ /* Variation from the spec. Table 3-35 of the cpu reference shows
+ * estatus not being changed for TLB miss but this appears to
+ * be incorrect. */
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[CR_TLBMISC] &= ~CR_TLBMISC_DBL;
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_WR;
+
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->fast_tlb_miss_addr;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "TLB MISS (double) at pc=%x\n",
+ env->regs[R_PC]);
+
+ /* Double TLB miss */
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_DBL;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ }
+ break;
+
+ case EXCP_TLBR:
+ case EXCP_TLBW:
+ case EXCP_TLBX:
+ qemu_log_mask(CPU_LOG_INT, "TLB PERM at pc=%x\n", env->regs[R_PC]);
+
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_WR;
+ }
+
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_SUPERA:
+ case EXCP_SUPERI:
+ case EXCP_SUPERD:
+ qemu_log_mask(CPU_LOG_INT, "SUPERVISOR exception at pc=%x\n",
+ env->regs[R_PC]);
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ }
+
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_ILLEGAL:
+ case EXCP_TRAP:
+ qemu_log_mask(CPU_LOG_INT, "TRAP exception at pc=%x\n",
+ env->regs[R_PC]);
+
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
+ env->regs[R_EA] = env->regs[R_PC] + 4;
+ }
+
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ case EXCP_BREAK:
+ if ((env->regs[CR_STATUS] & CR_STATUS_EH) == 0) {
+ env->regs[CR_BSTATUS] = env->regs[CR_STATUS];
+ env->regs[R_BA] = env->regs[R_PC] + 4;
+ }
+
+ env->regs[CR_STATUS] |= CR_STATUS_EH;
+ env->regs[CR_STATUS] &= ~(CR_STATUS_PIE | CR_STATUS_U);
+
+ env->regs[CR_EXCEPTION] &= ~(0x1F << 2);
+ env->regs[CR_EXCEPTION] |= (cs->exception_index & 0x1F) << 2;
+
+ env->regs[R_PC] = cpu->exception_addr;
+ break;
+
+ default:
+ cpu_abort(cs, "unhandled exception type=%d\n",
+ cs->exception_index);
+ break;
+ }
+}
+
+static int cpu_nios2_handle_virtual_page(
+ CPUState *cs, target_ulong address, int rw, int mmu_idx)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ target_ulong vaddr, paddr;
+ Nios2MMULookup lu;
+ unsigned int hit;
+ hit = mmu_translate(env, &lu, address, rw, mmu_idx);
+ if (hit) {
+ vaddr = address & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+
+ if (((rw == 0) && (lu.prot & PAGE_READ)) ||
+ ((rw == 1) && (lu.prot & PAGE_WRITE)) ||
+ ((rw == 2) && (lu.prot & PAGE_EXEC))) {
+
+ tlb_set_page(cs, vaddr, paddr, lu.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
+ } else {
+ /* Permission violation */
+ cs->exception_index = (rw == 0) ? EXCP_TLBR :
+ ((rw == 1) ? EXCP_TLBW :
+ EXCP_TLBX);
+ }
+ } else {
+ cs->exception_index = EXCP_TLBD;
+ }
+
+ if (rw == 2) {
+ env->regs[CR_TLBMISC] &= ~CR_TLBMISC_D;
+ } else {
+ env->regs[CR_TLBMISC] |= CR_TLBMISC_D;
+ }
+ env->regs[CR_PTEADDR] &= CR_PTEADDR_PTBASE_MASK;
+ env->regs[CR_PTEADDR] |= (address >> 10) & CR_PTEADDR_VPN_MASK;
+ env->mmu.pteaddr_wr = env->regs[CR_PTEADDR];
+ env->regs[CR_BADADDR] = address;
+ return 1;
+}
+
+int nios2_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw, int mmu_idx)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ if (cpu->mmu_present) {
+ if (MMU_SUPERVISOR_IDX == mmu_idx) {
+ if (address >= 0xC0000000) {
+ /* Kernel physical page - TLB bypassed */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ } else if (address >= 0x80000000) {
+ /* Kernel virtual page */
+ return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
+ } else {
+ /* User virtual page */
+ return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
+ }
+ } else {
+ if (address >= 0x80000000) {
+ /* Illegal access from user mode */
+ cs->exception_index = EXCP_SUPERA;
+ env->regs[CR_BADADDR] = address;
+ return 1;
+ } else {
+ /* User virtual page */
+ return cpu_nios2_handle_virtual_page(cs, address, rw, mmu_idx);
+ }
+ }
+ } else {
+ /* No MMU */
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, address, PAGE_BITS,
+ mmu_idx, TARGET_PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+hwaddr nios2_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ target_ulong vaddr, paddr = 0;
+ Nios2MMULookup lu;
+ unsigned int hit;
+
+ if (cpu->mmu_present && (addr < 0xC0000000)) {
+ hit = mmu_translate(env, &lu, addr, 0, 0);
+ if (hit) {
+ vaddr = addr & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+ } else {
+ paddr = -1;
+ qemu_log("cpu_get_phys_page debug MISS: %#" PRIx64 "\n", addr);
+ }
+ } else {
+ paddr = addr & TARGET_PAGE_MASK;
+ }
+
+ return paddr;
+}
+
+void nios2_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+
+ env->regs[CR_BADADDR] = addr;
+ env->regs[CR_EXCEPTION] = EXCP_UNALIGN << 2;
+ helper_raise_exception(env, EXCP_UNALIGN);
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/nios2/helper.h b/target/nios2/helper.h
new file mode 100644
index 0000000000..b0cb9146a5
--- /dev/null
+++ b/target/nios2/helper.h
@@ -0,0 +1,27 @@
+/*
+ * Altera Nios II helper routines header.
+ *
+ * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+DEF_HELPER_2(raise_exception, void, env, i32)
+
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(mmu_read_debug, void, env, i32)
+DEF_HELPER_3(mmu_write, void, env, i32, i32)
+DEF_HELPER_1(check_interrupts, void, env)
+#endif
diff --git a/target/nios2/mmu.c b/target/nios2/mmu.c
new file mode 100644
index 0000000000..fe9298af50
--- /dev/null
+++ b/target/nios2/mmu.c
@@ -0,0 +1,296 @@
+/*
+ * Altera Nios II MMU emulation for qemu.
+ *
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "mmu.h"
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* Define this to enable MMU debug messages */
+/* #define DEBUG_MMU */
+
+#ifdef DEBUG_MMU
+#define MMU_LOG(x) x
+#else
+#define MMU_LOG(x)
+#endif
+
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = nios2_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+
+void mmu_read_debug(CPUNios2State *env, uint32_t rn)
+{
+ switch (rn) {
+ case CR_TLBACC:
+ MMU_LOG(qemu_log("TLBACC READ %08X\n", env->regs[rn]));
+ break;
+
+ case CR_TLBMISC:
+ MMU_LOG(qemu_log("TLBMISC READ %08X\n", env->regs[rn]));
+ break;
+
+ case CR_PTEADDR:
+ MMU_LOG(qemu_log("PTEADDR READ %08X\n", env->regs[rn]));
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* rw - 0 = read, 1 = write, 2 = fetch. */
+unsigned int mmu_translate(CPUNios2State *env,
+ Nios2MMULookup *lu,
+ target_ulong vaddr, int rw, int mmu_idx)
+{
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+ int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4;
+ int vpn = vaddr >> 12;
+
+ MMU_LOG(qemu_log("mmu_translate vaddr %08X, pid %08X, vpn %08X\n",
+ vaddr, pid, vpn));
+
+ int way;
+ for (way = 0; way < cpu->tlb_num_ways; way++) {
+
+ Nios2TLBEntry *entry =
+ &env->mmu.tlb[(way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask)];
+
+ MMU_LOG(qemu_log("TLB[%d] TAG %08X, VPN %08X\n",
+ (way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask),
+ entry->tag, (entry->tag >> 12)));
+
+ if (((entry->tag >> 12) != vpn) ||
+ (((entry->tag & (1 << 11)) == 0) &&
+ ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) != pid))) {
+ continue;
+ }
+ lu->vaddr = vaddr & TARGET_PAGE_MASK;
+ lu->paddr = (entry->data & CR_TLBACC_PFN_MASK) << TARGET_PAGE_BITS;
+ lu->prot = ((entry->data & CR_TLBACC_R) ? PAGE_READ : 0) |
+ ((entry->data & CR_TLBACC_W) ? PAGE_WRITE : 0) |
+ ((entry->data & CR_TLBACC_X) ? PAGE_EXEC : 0);
+
+ MMU_LOG(qemu_log("HIT TLB[%d] %08X %08X %08X\n",
+ (way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask),
+ lu->vaddr, lu->paddr, lu->prot));
+ return 1;
+ }
+ return 0;
+}
+
+static void mmu_flush_pid(CPUNios2State *env, uint32_t pid)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+ int idx;
+ MMU_LOG(qemu_log("TLB Flush PID %d\n", pid));
+
+ for (idx = 0; idx < cpu->tlb_num_entries; idx++) {
+ Nios2TLBEntry *entry = &env->mmu.tlb[idx];
+
+ MMU_LOG(qemu_log("TLB[%d] => %08X %08X\n",
+ idx, entry->tag, entry->data));
+
+ if ((entry->tag & (1 << 10)) && (!(entry->tag & (1 << 11))) &&
+ ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) == pid)) {
+ uint32_t vaddr = entry->tag & TARGET_PAGE_MASK;
+
+ MMU_LOG(qemu_log("TLB Flush Page %08X\n", vaddr));
+
+ tlb_flush_page(cs, vaddr);
+ }
+ }
+}
+
+void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+
+ MMU_LOG(qemu_log("mmu_write %08X = %08X\n", rn, v));
+
+ switch (rn) {
+ case CR_TLBACC:
+ MMU_LOG(qemu_log("TLBACC: IG %02X, FLAGS %c%c%c%c%c, PFN %05X\n",
+ v >> CR_TLBACC_IGN_SHIFT,
+ (v & CR_TLBACC_C) ? 'C' : '.',
+ (v & CR_TLBACC_R) ? 'R' : '.',
+ (v & CR_TLBACC_W) ? 'W' : '.',
+ (v & CR_TLBACC_X) ? 'X' : '.',
+ (v & CR_TLBACC_G) ? 'G' : '.',
+ v & CR_TLBACC_PFN_MASK));
+
+ /* if tlbmisc.WE == 1 then trigger a TLB write on writes to TLBACC */
+ if (env->regs[CR_TLBMISC] & CR_TLBMISC_WR) {
+ int way = (env->regs[CR_TLBMISC] >> CR_TLBMISC_WAY_SHIFT);
+ int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2;
+ int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4;
+ int g = (v & CR_TLBACC_G) ? 1 : 0;
+ int valid = ((vpn & CR_TLBACC_PFN_MASK) < 0xC0000) ? 1 : 0;
+ Nios2TLBEntry *entry =
+ &env->mmu.tlb[(way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask)];
+ uint32_t newTag = (vpn << 12) | (g << 11) | (valid << 10) | pid;
+ uint32_t newData = v & (CR_TLBACC_C | CR_TLBACC_R | CR_TLBACC_W |
+ CR_TLBACC_X | CR_TLBACC_PFN_MASK);
+
+ if ((entry->tag != newTag) || (entry->data != newData)) {
+ if (entry->tag & (1 << 10)) {
+ /* Flush existing entry */
+ MMU_LOG(qemu_log("TLB Flush Page (OLD) %08X\n",
+ entry->tag & TARGET_PAGE_MASK));
+ tlb_flush_page(cs, entry->tag & TARGET_PAGE_MASK);
+ }
+ entry->tag = newTag;
+ entry->data = newData;
+ MMU_LOG(qemu_log("TLB[%d] = %08X %08X\n",
+ (way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask),
+ entry->tag, entry->data));
+ }
+ /* Auto-increment tlbmisc.WAY */
+ env->regs[CR_TLBMISC] =
+ (env->regs[CR_TLBMISC] & ~CR_TLBMISC_WAY_MASK) |
+ (((way + 1) & (cpu->tlb_num_ways - 1)) <<
+ CR_TLBMISC_WAY_SHIFT);
+ }
+
+ /* Writes to TLBACC don't change the read-back value */
+ env->mmu.tlbacc_wr = v;
+ break;
+
+ case CR_TLBMISC:
+ MMU_LOG(qemu_log("TLBMISC: WAY %X, FLAGS %c%c%c%c%c%c, PID %04X\n",
+ v >> CR_TLBMISC_WAY_SHIFT,
+ (v & CR_TLBMISC_RD) ? 'R' : '.',
+ (v & CR_TLBMISC_WR) ? 'W' : '.',
+ (v & CR_TLBMISC_DBL) ? '2' : '.',
+ (v & CR_TLBMISC_BAD) ? 'B' : '.',
+ (v & CR_TLBMISC_PERM) ? 'P' : '.',
+ (v & CR_TLBMISC_D) ? 'D' : '.',
+ (v & CR_TLBMISC_PID_MASK) >> 4));
+
+ if ((v & CR_TLBMISC_PID_MASK) !=
+ (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK)) {
+ mmu_flush_pid(env, (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >>
+ CR_TLBMISC_PID_SHIFT);
+ }
+ /* if tlbmisc.RD == 1 then trigger a TLB read on writes to TLBMISC */
+ if (v & CR_TLBMISC_RD) {
+ int way = (v >> CR_TLBMISC_WAY_SHIFT);
+ int vpn = (env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK) >> 2;
+ Nios2TLBEntry *entry =
+ &env->mmu.tlb[(way * cpu->tlb_num_ways) +
+ (vpn & env->mmu.tlb_entry_mask)];
+
+ env->regs[CR_TLBACC] &= CR_TLBACC_IGN_MASK;
+ env->regs[CR_TLBACC] |= entry->data;
+ env->regs[CR_TLBACC] |= (entry->tag & (1 << 11)) ? CR_TLBACC_G : 0;
+ env->regs[CR_TLBMISC] =
+ (v & ~CR_TLBMISC_PID_MASK) |
+ ((entry->tag & ((1 << cpu->pid_num_bits) - 1)) <<
+ CR_TLBMISC_PID_SHIFT);
+ env->regs[CR_PTEADDR] &= ~CR_PTEADDR_VPN_MASK;
+ env->regs[CR_PTEADDR] |= (entry->tag >> 12) << CR_PTEADDR_VPN_SHIFT;
+ MMU_LOG(qemu_log("TLB READ way %d, vpn %05X, tag %08X, data %08X, "
+ "tlbacc %08X, tlbmisc %08X, pteaddr %08X\n",
+ way, vpn, entry->tag, entry->data,
+ env->regs[CR_TLBACC], env->regs[CR_TLBMISC],
+ env->regs[CR_PTEADDR]));
+ } else {
+ env->regs[CR_TLBMISC] = v;
+ }
+
+ env->mmu.tlbmisc_wr = v;
+ break;
+
+ case CR_PTEADDR:
+ MMU_LOG(qemu_log("PTEADDR: PTBASE %03X, VPN %05X\n",
+ v >> CR_PTEADDR_PTBASE_SHIFT,
+ (v & CR_PTEADDR_VPN_MASK) >> CR_PTEADDR_VPN_SHIFT));
+
+ /* Writes to PTEADDR don't change the read-back VPN value */
+ env->regs[CR_PTEADDR] = (v & ~CR_PTEADDR_VPN_MASK) |
+ (env->regs[CR_PTEADDR] & CR_PTEADDR_VPN_MASK);
+ env->mmu.pteaddr_wr = v;
+ break;
+
+ default:
+ break;
+ }
+}
+
+void mmu_init(CPUNios2State *env)
+{
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+ Nios2MMU *mmu = &env->mmu;
+
+ MMU_LOG(qemu_log("mmu_init\n"));
+
+ mmu->tlb_entry_mask = (cpu->tlb_num_entries / cpu->tlb_num_ways) - 1;
+ mmu->tlb = g_new0(Nios2TLBEntry, cpu->tlb_num_entries);
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUNios2State *env)
+{
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+ int i;
+
+ cpu_fprintf(f, "MMU: ways %d, entries %d, pid bits %d\n",
+ cpu->tlb_num_ways, cpu->tlb_num_entries,
+ cpu->pid_num_bits);
+
+ for (i = 0; i < cpu->tlb_num_entries; i++) {
+ Nios2TLBEntry *entry = &env->mmu.tlb[i];
+ cpu_fprintf(f, "TLB[%d] = %08X %08X %c VPN %05X "
+ "PID %02X %c PFN %05X %c%c%c%c\n",
+ i, entry->tag, entry->data,
+ (entry->tag & (1 << 10)) ? 'V' : '-',
+ entry->tag >> 12,
+ entry->tag & ((1 << cpu->pid_num_bits) - 1),
+ (entry->tag & (1 << 11)) ? 'G' : '-',
+ entry->data & CR_TLBACC_PFN_MASK,
+ (entry->data & CR_TLBACC_C) ? 'C' : '-',
+ (entry->data & CR_TLBACC_R) ? 'R' : '-',
+ (entry->data & CR_TLBACC_W) ? 'W' : '-',
+ (entry->data & CR_TLBACC_X) ? 'X' : '-');
+ }
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/nios2/mmu.h b/target/nios2/mmu.h
new file mode 100644
index 0000000000..51d3d1f43a
--- /dev/null
+++ b/target/nios2/mmu.h
@@ -0,0 +1,50 @@
+/*
+ * Altera Nios II MMU emulation for qemu.
+ *
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef MMU_NIOS2_H
+#define MMU_NIOS2_H
+
+typedef struct Nios2TLBEntry {
+ target_ulong tag;
+ target_ulong data;
+} Nios2TLBEntry;
+
+typedef struct Nios2MMU {
+ int tlb_entry_mask;
+ uint32_t pteaddr_wr;
+ uint32_t tlbacc_wr;
+ uint32_t tlbmisc_wr;
+ Nios2TLBEntry *tlb;
+} Nios2MMU;
+
+typedef struct Nios2MMULookup {
+ target_ulong vaddr;
+ target_ulong paddr;
+ int prot;
+} Nios2MMULookup;
+
+void mmu_flip_um(CPUNios2State *env, unsigned int um);
+unsigned int mmu_translate(CPUNios2State *env,
+ Nios2MMULookup *lu,
+ target_ulong vaddr, int rw, int mmu_idx);
+void mmu_read_debug(CPUNios2State *env, uint32_t rn);
+void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v);
+void mmu_init(CPUNios2State *env);
+
+#endif /* MMU_NIOS2_H */
diff --git a/target/nios2/monitor.c b/target/nios2/monitor.c
new file mode 100644
index 0000000000..422c81656a
--- /dev/null
+++ b/target/nios2/monitor.c
@@ -0,0 +1,35 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env();
+
+ dump_mmu((FILE *)mon, (fprintf_function)monitor_printf, env1);
+}
diff --git a/target/nios2/op_helper.c b/target/nios2/op_helper.c
new file mode 100644
index 0000000000..538853cda7
--- /dev/null
+++ b/target/nios2/op_helper.c
@@ -0,0 +1,47 @@
+/*
+ * Altera Nios II helper routines.
+ *
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+
+#if !defined(CONFIG_USER_ONLY)
+void helper_mmu_read_debug(CPUNios2State *env, uint32_t rn)
+{
+ mmu_read_debug(env, rn);
+}
+
+void helper_mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v)
+{
+ mmu_write(env, rn, v);
+}
+
+void helper_check_interrupts(CPUNios2State *env)
+{
+ nios2_check_interrupts(env);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_raise_exception(CPUNios2State *env, uint32_t index)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
new file mode 100644
index 0000000000..2d738391ad
--- /dev/null
+++ b/target/nios2/translate.c
@@ -0,0 +1,958 @@
+/*
+ * Altera Nios II emulation for qemu: main translation routines.
+ *
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ * Copyright (C) 2012 Chris Wulff <crwulff@gmail.com>
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * (Portions of this file that were originally from nios2sim-ng.)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "cpu.h"
+#include "tcg-op.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+#include "exec/cpu_ldst.h"
+
+#define INSTRUCTION_FLG(func, flags) { (func), (flags) }
+#define INSTRUCTION(func) \
+ INSTRUCTION_FLG(func, 0)
+#define INSTRUCTION_NOP() \
+ INSTRUCTION_FLG(nop, 0)
+#define INSTRUCTION_UNIMPLEMENTED() \
+ INSTRUCTION_FLG(gen_excp, EXCP_UNIMPL)
+#define INSTRUCTION_ILLEGAL() \
+ INSTRUCTION_FLG(gen_excp, EXCP_ILLEGAL)
+
+/* Special R-Type instruction opcode */
+#define INSN_R_TYPE 0x3A
+
+/* I-Type instruction parsing */
+#define I_TYPE(instr, code) \
+ struct { \
+ uint8_t op; \
+ union { \
+ uint16_t imm16; \
+ int16_t imm16s; \
+ }; \
+ uint8_t b; \
+ uint8_t a; \
+ } (instr) = { \
+ .op = extract32((code), 0, 6), \
+ .imm16 = extract32((code), 6, 16), \
+ .b = extract32((code), 22, 5), \
+ .a = extract32((code), 27, 5), \
+ }
+
+/* R-Type instruction parsing */
+#define R_TYPE(instr, code) \
+ struct { \
+ uint8_t op; \
+ uint8_t imm5; \
+ uint8_t opx; \
+ uint8_t c; \
+ uint8_t b; \
+ uint8_t a; \
+ } (instr) = { \
+ .op = extract32((code), 0, 6), \
+ .imm5 = extract32((code), 6, 5), \
+ .opx = extract32((code), 11, 6), \
+ .c = extract32((code), 17, 5), \
+ .b = extract32((code), 22, 5), \
+ .a = extract32((code), 27, 5), \
+ }
+
+/* J-Type instruction parsing */
+#define J_TYPE(instr, code) \
+ struct { \
+ uint8_t op; \
+ uint32_t imm26; \
+ } (instr) = { \
+ .op = extract32((code), 0, 6), \
+ .imm26 = extract32((code), 6, 26), \
+ }
+
+typedef struct DisasContext {
+ TCGv_ptr cpu_env;
+ TCGv *cpu_R;
+ TCGv_i32 zero;
+ int is_jmp;
+ target_ulong pc;
+ TranslationBlock *tb;
+ int mem_idx;
+ bool singlestep_enabled;
+} DisasContext;
+
+typedef struct Nios2Instruction {
+ void (*handler)(DisasContext *dc, uint32_t code, uint32_t flags);
+ uint32_t flags;
+} Nios2Instruction;
+
+static uint8_t get_opcode(uint32_t code)
+{
+ I_TYPE(instr, code);
+ return instr.op;
+}
+
+static uint8_t get_opxcode(uint32_t code)
+{
+ R_TYPE(instr, code);
+ return instr.opx;
+}
+
+static TCGv load_zero(DisasContext *dc)
+{
+ if (TCGV_IS_UNUSED_I32(dc->zero)) {
+ dc->zero = tcg_const_i32(0);
+ }
+ return dc->zero;
+}
+
+static TCGv load_gpr(DisasContext *dc, uint8_t reg)
+{
+ if (likely(reg != R_ZERO)) {
+ return dc->cpu_R[reg];
+ } else {
+ return load_zero(dc);
+ }
+}
+
+static void t_gen_helper_raise_exception(DisasContext *dc,
+ uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+
+ tcg_gen_movi_tl(dc->cpu_R[R_PC], dc->pc);
+ gen_helper_raise_exception(dc->cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+static bool use_goto_tb(DisasContext *dc, uint32_t dest)
+{
+ if (unlikely(dc->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, uint32_t dest)
+{
+ TranslationBlock *tb = dc->tb;
+
+ if (use_goto_tb(dc, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
+ tcg_gen_exit_tb((tcg_target_long)tb + n);
+ } else {
+ tcg_gen_movi_tl(dc->cpu_R[R_PC], dest);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void gen_excp(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ t_gen_helper_raise_exception(dc, flags);
+}
+
+static void gen_check_supervisor(DisasContext *dc)
+{
+ if (dc->tb->flags & CR_STATUS_U) {
+ /* CPU in user mode, privileged instruction called, stop. */
+ t_gen_helper_raise_exception(dc, EXCP_SUPERI);
+ }
+}
+
+/*
+ * Used as a placeholder for all instructions which do not have
+ * an effect on the simulator (e.g. flush, sync)
+ */
+static void nop(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ /* Nothing to do here */
+}
+
+/*
+ * J-Type instructions
+ */
+static void jmpi(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ J_TYPE(instr, code);
+ gen_goto_tb(dc, 0, (dc->pc & 0xF0000000) | (instr.imm26 << 2));
+ dc->is_jmp = DISAS_TB_JUMP;
+}
+
+static void call(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4);
+ jmpi(dc, code, flags);
+}
+
+/*
+ * I-Type instructions
+ */
+/* Load instructions */
+static void gen_ldx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+
+ TCGv addr = tcg_temp_new();
+ TCGv data;
+
+ /*
+ * WARNING: Loads into R_ZERO are ignored, but we must generate the
+ * memory access itself to emulate the CPU precisely. Load
+ * from a protected page to R_ZERO will cause SIGSEGV on
+ * the Nios2 CPU.
+ */
+ if (likely(instr.b != R_ZERO)) {
+ data = dc->cpu_R[instr.b];
+ } else {
+ data = tcg_temp_new();
+ }
+
+ tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16s);
+ tcg_gen_qemu_ld_tl(data, addr, dc->mem_idx, flags);
+
+ if (unlikely(instr.b == R_ZERO)) {
+ tcg_temp_free(data);
+ }
+
+ tcg_temp_free(addr);
+}
+
+/* Store instructions */
+static void gen_stx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+ TCGv val = load_gpr(dc, instr.b);
+
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_tl(addr, load_gpr(dc, instr.a), instr.imm16s);
+ tcg_gen_qemu_st_tl(val, addr, dc->mem_idx, flags);
+ tcg_temp_free(addr);
+}
+
+/* Branch instructions */
+static void br(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+
+ gen_goto_tb(dc, 0, dc->pc + 4 + (instr.imm16s & -4));
+ dc->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_bxx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ I_TYPE(instr, code);
+
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_brcond_tl(flags, dc->cpu_R[instr.a], dc->cpu_R[instr.b], l1);
+ gen_goto_tb(dc, 0, dc->pc + 4);
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, dc->pc + 4 + (instr.imm16s & -4));
+ dc->is_jmp = DISAS_TB_JUMP;
+}
+
+/* Comparison instructions */
+#define gen_i_cmpxx(fname, op3) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ I_TYPE(instr, (code)); \
+ tcg_gen_setcondi_tl(flags, (dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \
+ (op3)); \
+}
+
+gen_i_cmpxx(gen_cmpxxsi, instr.imm16s)
+gen_i_cmpxx(gen_cmpxxui, instr.imm16)
+
+/* Math/logic instructions */
+#define gen_i_math_logic(fname, insn, resimm, op3) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ I_TYPE(instr, (code)); \
+ if (unlikely(instr.b == R_ZERO)) { /* Store to R_ZERO is ignored */ \
+ return; \
+ } else if (instr.a == R_ZERO) { /* MOVxI optimizations */ \
+ tcg_gen_movi_tl(dc->cpu_R[instr.b], (resimm) ? (op3) : 0); \
+ } else { \
+ tcg_gen_##insn##_tl((dc)->cpu_R[instr.b], (dc)->cpu_R[instr.a], \
+ (op3)); \
+ } \
+}
+
+gen_i_math_logic(addi, addi, 1, instr.imm16s)
+gen_i_math_logic(muli, muli, 0, instr.imm16s)
+
+gen_i_math_logic(andi, andi, 0, instr.imm16)
+gen_i_math_logic(ori, ori, 1, instr.imm16)
+gen_i_math_logic(xori, xori, 1, instr.imm16)
+
+gen_i_math_logic(andhi, andi, 0, instr.imm16 << 16)
+gen_i_math_logic(orhi , ori, 1, instr.imm16 << 16)
+gen_i_math_logic(xorhi, xori, 1, instr.imm16 << 16)
+
+/* Prototype only, defined below */
+static void handle_r_type_instr(DisasContext *dc, uint32_t code,
+ uint32_t flags);
+
+static const Nios2Instruction i_type_instructions[] = {
+ INSTRUCTION(call), /* call */
+ INSTRUCTION(jmpi), /* jmpi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UB), /* ldbu */
+ INSTRUCTION(addi), /* addi */
+ INSTRUCTION_FLG(gen_stx, MO_UB), /* stb */
+ INSTRUCTION(br), /* br */
+ INSTRUCTION_FLG(gen_ldx, MO_SB), /* ldb */
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_GE), /* cmpgei */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhu */
+ INSTRUCTION(andi), /* andi */
+ INSTRUCTION_FLG(gen_stx, MO_UW), /* sth */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_GE), /* bge */
+ INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldh */
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_LT), /* cmplti */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_NOP(), /* initda */
+ INSTRUCTION(ori), /* ori */
+ INSTRUCTION_FLG(gen_stx, MO_UL), /* stw */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_LT), /* blt */
+ INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldw */
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_NE), /* cmpnei */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_NOP(), /* flushda */
+ INSTRUCTION(xori), /* xori */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_NE), /* bne */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_cmpxxsi, TCG_COND_EQ), /* cmpeqi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UB), /* ldbuio */
+ INSTRUCTION(muli), /* muli */
+ INSTRUCTION_FLG(gen_stx, MO_UB), /* stbio */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_EQ), /* beq */
+ INSTRUCTION_FLG(gen_ldx, MO_SB), /* ldbio */
+ INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_GEU), /* cmpgeui */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_ldx, MO_UW), /* ldhuio */
+ INSTRUCTION(andhi), /* andhi */
+ INSTRUCTION_FLG(gen_stx, MO_UW), /* sthio */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_GEU), /* bgeu */
+ INSTRUCTION_FLG(gen_ldx, MO_SW), /* ldhio */
+ INSTRUCTION_FLG(gen_cmpxxui, TCG_COND_LTU), /* cmpltui */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_UNIMPLEMENTED(), /* custom */
+ INSTRUCTION_NOP(), /* initd */
+ INSTRUCTION(orhi), /* orhi */
+ INSTRUCTION_FLG(gen_stx, MO_SL), /* stwio */
+ INSTRUCTION_FLG(gen_bxx, TCG_COND_LTU), /* bltu */
+ INSTRUCTION_FLG(gen_ldx, MO_UL), /* ldwio */
+ INSTRUCTION_UNIMPLEMENTED(), /* rdprs */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(handle_r_type_instr, 0), /* R-Type */
+ INSTRUCTION_NOP(), /* flushd */
+ INSTRUCTION(xorhi), /* xorhi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+};
+
+/*
+ * R-Type instructions
+ */
+/*
+ * status <- estatus
+ * PC <- ea
+ */
+static void eret(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_mov_tl(dc->cpu_R[CR_STATUS], dc->cpu_R[CR_ESTATUS]);
+ tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_EA]);
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+/* PC <- ra */
+static void ret(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_RA]);
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+/* PC <- ba */
+static void bret(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ tcg_gen_mov_tl(dc->cpu_R[R_PC], dc->cpu_R[R_BA]);
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+/* PC <- rA */
+static void jmp(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a));
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+/* rC <- PC + 4 */
+static void nextpc(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_movi_tl(dc->cpu_R[instr.c], dc->pc + 4);
+ }
+}
+
+/*
+ * ra <- PC + 4
+ * PC <- rA
+ */
+static void callr(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ tcg_gen_mov_tl(dc->cpu_R[R_PC], load_gpr(dc, instr.a));
+ tcg_gen_movi_tl(dc->cpu_R[R_RA], dc->pc + 4);
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+/* rC <- ctlN */
+static void rdctl(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ gen_check_supervisor(dc);
+
+ switch (instr.imm5 + CR_BASE) {
+ case CR_PTEADDR:
+ case CR_TLBACC:
+ case CR_TLBMISC:
+ {
+#if !defined(CONFIG_USER_ONLY)
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
+#ifdef DEBUG_MMU
+ TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
+ gen_helper_mmu_read_debug(dc->cpu_R[instr.c], dc->cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+#endif
+ }
+#endif
+ break;
+ }
+
+ default:
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_mov_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.imm5 + CR_BASE]);
+ }
+ break;
+ }
+}
+
+/* ctlN <- rA */
+static void wrctl(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+
+ gen_check_supervisor(dc);
+
+ switch (instr.imm5 + CR_BASE) {
+ case CR_PTEADDR:
+ case CR_TLBACC:
+ case CR_TLBMISC:
+ {
+#if !defined(CONFIG_USER_ONLY)
+ TCGv_i32 tmp = tcg_const_i32(instr.imm5 + CR_BASE);
+ gen_helper_mmu_write(dc->cpu_env, tmp, load_gpr(dc, instr.a));
+ tcg_temp_free_i32(tmp);
+#endif
+ break;
+ }
+
+ default:
+ tcg_gen_mov_tl(dc->cpu_R[instr.imm5 + CR_BASE], load_gpr(dc, instr.a));
+ break;
+ }
+
+ /* If interrupts were enabled using WRCTL, trigger them. */
+#if !defined(CONFIG_USER_ONLY)
+ if ((instr.imm5 + CR_BASE) == CR_STATUS) {
+ gen_helper_check_interrupts(dc->cpu_env);
+ }
+#endif
+}
+
+/* Comparison instructions */
+static void gen_cmpxx(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, code);
+ if (likely(instr.c != R_ZERO)) {
+ tcg_gen_setcond_tl(flags, dc->cpu_R[instr.c], dc->cpu_R[instr.a],
+ dc->cpu_R[instr.b]);
+ }
+}
+
+/* Math/logic instructions */
+#define gen_r_math_logic(fname, insn, op3) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ R_TYPE(instr, (code)); \
+ if (likely(instr.c != R_ZERO)) { \
+ tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), \
+ (op3)); \
+ } \
+}
+
+gen_r_math_logic(add, add_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(sub, sub_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(mul, mul_tl, load_gpr(dc, instr.b))
+
+gen_r_math_logic(and, and_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(or, or_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(xor, xor_tl, load_gpr(dc, instr.b))
+gen_r_math_logic(nor, nor_tl, load_gpr(dc, instr.b))
+
+gen_r_math_logic(srai, sari_tl, instr.imm5)
+gen_r_math_logic(srli, shri_tl, instr.imm5)
+gen_r_math_logic(slli, shli_tl, instr.imm5)
+gen_r_math_logic(roli, rotli_tl, instr.imm5)
+
+#define gen_r_mul(fname, insn) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ R_TYPE(instr, (code)); \
+ if (likely(instr.c != R_ZERO)) { \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_##insn(t0, dc->cpu_R[instr.c], \
+ load_gpr(dc, instr.a), load_gpr(dc, instr.b)); \
+ tcg_temp_free(t0); \
+ } \
+}
+
+gen_r_mul(mulxss, muls2_tl)
+gen_r_mul(mulxuu, mulu2_tl)
+gen_r_mul(mulxsu, mulsu2_tl)
+
+#define gen_r_shift_s(fname, insn) \
+static void (fname)(DisasContext *dc, uint32_t code, uint32_t flags) \
+{ \
+ R_TYPE(instr, (code)); \
+ if (likely(instr.c != R_ZERO)) { \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_andi_tl(t0, load_gpr((dc), instr.b), 31); \
+ tcg_gen_##insn((dc)->cpu_R[instr.c], load_gpr((dc), instr.a), t0); \
+ tcg_temp_free(t0); \
+ } \
+}
+
+gen_r_shift_s(sra, sar_tl)
+gen_r_shift_s(srl, shr_tl)
+gen_r_shift_s(sll, shl_tl)
+gen_r_shift_s(rol, rotl_tl)
+gen_r_shift_s(ror, rotr_tl)
+
+static void divs(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, (code));
+
+ /* Stores into R_ZERO are ignored */
+ if (unlikely(instr.c == R_ZERO)) {
+ return;
+ }
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+
+ tcg_gen_ext32s_tl(t0, load_gpr(dc, instr.a));
+ tcg_gen_ext32s_tl(t1, load_gpr(dc, instr.b));
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(dc->cpu_R[instr.c], t0, t1);
+ tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]);
+
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static void divu(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ R_TYPE(instr, (code));
+
+ /* Stores into R_ZERO are ignored */
+ if (unlikely(instr.c == R_ZERO)) {
+ return;
+ }
+
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+
+ tcg_gen_ext32u_tl(t0, load_gpr(dc, instr.a));
+ tcg_gen_ext32u_tl(t1, load_gpr(dc, instr.b));
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(dc->cpu_R[instr.c], t0, t1);
+ tcg_gen_ext32s_tl(dc->cpu_R[instr.c], dc->cpu_R[instr.c]);
+
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static const Nios2Instruction r_type_instructions[] = {
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(eret), /* eret */
+ INSTRUCTION(roli), /* roli */
+ INSTRUCTION(rol), /* rol */
+ INSTRUCTION_NOP(), /* flushp */
+ INSTRUCTION(ret), /* ret */
+ INSTRUCTION(nor), /* nor */
+ INSTRUCTION(mulxuu), /* mulxuu */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_GE), /* cmpge */
+ INSTRUCTION(bret), /* bret */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(ror), /* ror */
+ INSTRUCTION_NOP(), /* flushi */
+ INSTRUCTION(jmp), /* jmp */
+ INSTRUCTION(and), /* and */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LT), /* cmplt */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(slli), /* slli */
+ INSTRUCTION(sll), /* sll */
+ INSTRUCTION_UNIMPLEMENTED(), /* wrprs */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(or), /* or */
+ INSTRUCTION(mulxsu), /* mulxsu */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_NE), /* cmpne */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(srli), /* srli */
+ INSTRUCTION(srl), /* srl */
+ INSTRUCTION(nextpc), /* nextpc */
+ INSTRUCTION(callr), /* callr */
+ INSTRUCTION(xor), /* xor */
+ INSTRUCTION(mulxss), /* mulxss */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_EQ), /* cmpeq */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(divu), /* divu */
+ INSTRUCTION(divs), /* div */
+ INSTRUCTION(rdctl), /* rdctl */
+ INSTRUCTION(mul), /* mul */
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_GEU), /* cmpgeu */
+ INSTRUCTION_NOP(), /* initi */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_excp, EXCP_TRAP), /* trap */
+ INSTRUCTION(wrctl), /* wrctl */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LTU), /* cmpltu */
+ INSTRUCTION(add), /* add */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_FLG(gen_excp, EXCP_BREAK), /* break */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(nop), /* nop */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION(sub), /* sub */
+ INSTRUCTION(srai), /* srai */
+ INSTRUCTION(sra), /* sra */
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+ INSTRUCTION_ILLEGAL(),
+};
+
+static void handle_r_type_instr(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+ uint8_t opx;
+ const Nios2Instruction *instr;
+
+ opx = get_opxcode(code);
+ if (unlikely(opx >= ARRAY_SIZE(r_type_instructions))) {
+ goto illegal_op;
+ }
+
+ instr = &r_type_instructions[opx];
+ instr->handler(dc, code, instr->flags);
+
+ return;
+
+illegal_op:
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
+}
+
+static void handle_instruction(DisasContext *dc, CPUNios2State *env)
+{
+ uint32_t code;
+ uint8_t op;
+ const Nios2Instruction *instr;
+#if defined(CONFIG_USER_ONLY)
+ /* FIXME: Is this needed ? */
+ if (dc->pc >= 0x1000 && dc->pc < 0x2000) {
+ env->regs[R_PC] = dc->pc;
+ t_gen_helper_raise_exception(dc, 0xaa);
+ return;
+ }
+#endif
+ code = cpu_ldl_code(env, dc->pc);
+ op = get_opcode(code);
+
+ if (unlikely(op >= ARRAY_SIZE(i_type_instructions))) {
+ goto illegal_op;
+ }
+
+ TCGV_UNUSED_I32(dc->zero);
+
+ instr = &i_type_instructions[op];
+ instr->handler(dc, code, instr->flags);
+
+ if (!TCGV_IS_UNUSED_I32(dc->zero)) {
+ tcg_temp_free(dc->zero);
+ }
+
+ return;
+
+illegal_op:
+ t_gen_helper_raise_exception(dc, EXCP_ILLEGAL);
+}
+
+static const char * const regnames[] = {
+ "zero", "at", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19",
+ "r20", "r21", "r22", "r23",
+ "et", "bt", "gp", "sp",
+ "fp", "ea", "ba", "ra",
+ "status", "estatus", "bstatus", "ienable",
+ "ipending", "cpuid", "reserved0", "exception",
+ "pteaddr", "tlbacc", "tlbmisc", "reserved1",
+ "badaddr", "config", "mpubase", "mpuacc",
+ "reserved2", "reserved3", "reserved4", "reserved5",
+ "reserved6", "reserved7", "reserved8", "reserved9",
+ "reserved10", "reserved11", "reserved12", "reserved13",
+ "reserved14", "reserved15", "reserved16", "reserved17",
+ "rpc"
+};
+
+static TCGv_ptr cpu_env;
+static TCGv cpu_R[NUM_CORE_REGS];
+
+#include "exec/gen-icount.h"
+
+static void gen_exception(DisasContext *dc, uint32_t excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+
+ tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUNios2State *env, TranslationBlock *tb)
+{
+ Nios2CPU *cpu = nios2_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc1, *dc = &dc1;
+ int num_insns;
+ int max_insns;
+
+ /* Initialize DC */
+ dc->cpu_env = cpu_env;
+ dc->cpu_R = cpu_R;
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = tb->pc;
+ dc->tb = tb;
+ dc->mem_idx = cpu_mmu_index(env, false);
+ dc->singlestep_enabled = cs->singlestep_enabled;
+
+ /* Set up instruction counts */
+ num_insns = 0;
+ if (cs->singlestep_enabled || singlestep) {
+ max_insns = 1;
+ } else {
+ int page_insns = (TARGET_PAGE_SIZE - (tb->pc & TARGET_PAGE_MASK)) / 4;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > page_insns) {
+ max_insns = page_insns;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+ }
+
+ gen_tb_start(tb);
+ do {
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ gen_exception(dc, EXCP_DEBUG);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 4;
+ break;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ /* Decode an instruction */
+ handle_instruction(dc, env);
+
+ dc->pc += 4;
+
+ /* Translation stops when a conditional branch is encountered.
+ * Otherwise the subsequent code could get translated several times.
+ * Also stop translation when a page boundary is reached. This
+ * ensures prefetch aborts occur at the right place. */
+ } while (!dc->is_jmp &&
+ !tcg_op_buf_full() &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ /* Indicate where the next block should start */
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ /* Save the current PC back into the CPU register */
+ tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
+ tcg_gen_exit_tb(0);
+ break;
+
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* The jump will already have updated the PC register */
+ tcg_gen_exit_tb(0);
+ break;
+
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+
+ /* End off the block */
+ gen_tb_end(tb, num_insns);
+
+ /* Mark instruction starts for the final generated instruction */
+ tb->size = dc->pc - tb->pc;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(tb->pc)) {
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(tb->pc));
+ log_target_disas(cs, tb->pc, dc->pc - tb->pc, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void nios2_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ Nios2CPU *cpu = NIOS2_CPU(cs);
+ CPUNios2State *env = &cpu->env;
+ int i;
+
+ if (!env || !f) {
+ return;
+ }
+
+ cpu_fprintf(f, "IN: PC=%x %s\n",
+ env->regs[R_PC], lookup_symbol(env->regs[R_PC]));
+
+ for (i = 0; i < NUM_CORE_REGS; i++) {
+ cpu_fprintf(f, "%9s=%8.8x ", regnames[i], env->regs[i]);
+ if ((i + 1) % 4 == 0) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+#if !defined(CONFIG_USER_ONLY)
+ cpu_fprintf(f, " mmu write: VPN=%05X PID %02X TLBACC %08X\n",
+ env->mmu.pteaddr_wr & CR_PTEADDR_VPN_MASK,
+ (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4,
+ env->mmu.tlbacc_wr);
+#endif
+ cpu_fprintf(f, "\n\n");
+}
+
+void nios2_tcg_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+
+ for (i = 0; i < NUM_CORE_REGS; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUNios2State, regs[i]),
+ regnames[i]);
+ }
+}
+
+void restore_state_to_opc(CPUNios2State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->regs[R_PC] = data[0];
+}
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index 698e87bb25..422139d29f 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -44,14 +44,7 @@ static void openrisc_cpu_reset(CPUState *s)
occ->parent_reset(s);
-#ifndef CONFIG_USER_ONLY
- memset(&cpu->env, 0, offsetof(CPUOpenRISCState, tlb));
-#else
- memset(&cpu->env, 0, offsetof(CPUOpenRISCState, irq));
-#endif
-
- tlb_flush(s, 1);
- /*tb_flush(&cpu->env); FIXME: Do we need it? */
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, end_reset_fields));
cpu->env.pc = 0x100;
cpu->env.sr = SR_FO | SR_SM;
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
index aaf153579a..508ef568b4 100644
--- a/target/openrisc/cpu.h
+++ b/target/openrisc/cpu.h
@@ -300,6 +300,9 @@ typedef struct CPUOpenRISCState {
in solt so far. */
uint32_t btaken; /* the SR_F bit */
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
diff --git a/target/openrisc/helper.h b/target/openrisc/helper.h
index f53fa21344..bcc7245fc3 100644
--- a/target/openrisc/helper.h
+++ b/target/openrisc/helper.h
@@ -54,8 +54,6 @@ FOP_CMP(ge)
#undef FOP_CMP
/* int */
-DEF_HELPER_FLAGS_1(ff1, 0, tl, tl)
-DEF_HELPER_FLAGS_1(fl1, 0, tl, tl)
DEF_HELPER_FLAGS_3(mul32, 0, i32, env, i32, i32)
/* interrupt */
diff --git a/target/openrisc/int_helper.c b/target/openrisc/int_helper.c
index 4d1f958901..ba0fd277cd 100644
--- a/target/openrisc/int_helper.c
+++ b/target/openrisc/int_helper.c
@@ -24,25 +24,6 @@
#include "exception.h"
#include "qemu/host-utils.h"
-target_ulong HELPER(ff1)(target_ulong x)
-{
-/*#ifdef TARGET_OPENRISC64
- return x ? ctz64(x) + 1 : 0;
-#else*/
- return x ? ctz32(x) + 1 : 0;
-/*#endif*/
-}
-
-target_ulong HELPER(fl1)(target_ulong x)
-{
-/* not used yet, open it when we need or64. */
-/*#ifdef TARGET_OPENRISC64
- return 64 - clz64(x);
-#else*/
- return 32 - clz32(x);
-/*#endif*/
-}
-
uint32_t HELPER(mul32)(CPUOpenRISCState *env,
uint32_t ra, uint32_t rb)
{
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
index 5fe3f11ffc..e43fc84ef7 100644
--- a/target/openrisc/interrupt.c
+++ b/target/openrisc/interrupt.c
@@ -45,7 +45,7 @@ void openrisc_cpu_do_interrupt(CPUState *cs)
/* For machine-state changed between user-mode and supervisor mode,
we need flush TLB when we enter&exit EXCP. */
- tlb_flush(cs, 1);
+ tlb_flush(cs);
env->esr = env->sr;
env->sr &= ~SR_DME;
diff --git a/target/openrisc/interrupt_helper.c b/target/openrisc/interrupt_helper.c
index 116f9109a7..0ed5146e8d 100644
--- a/target/openrisc/interrupt_helper.c
+++ b/target/openrisc/interrupt_helper.c
@@ -53,7 +53,7 @@ void HELPER(rfe)(CPUOpenRISCState *env)
}
if (need_flush_tlb) {
- tlb_flush(cs, 1);
+ tlb_flush(cs);
}
#endif
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
index a719e452be..daea902856 100644
--- a/target/openrisc/sys_helper.c
+++ b/target/openrisc/sys_helper.c
@@ -47,7 +47,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env,
case TO_SPR(0, 17): /* SR */
if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^
(rb & (SR_IME | SR_DME | SR_SM))) {
- tlb_flush(cs, 1);
+ tlb_flush(cs);
}
env->sr = rb;
env->sr |= SR_FO; /* FO is const equal to 1 */
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
index 229361aed1..03fa7db570 100644
--- a/target/openrisc/translate.c
+++ b/target/openrisc/translate.c
@@ -602,11 +602,13 @@ static void dec_calc(DisasContext *dc, uint32_t insn)
switch (op1) {
case 0x00: /* l.ff1 */
LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb);
- gen_helper_ff1(cpu_R[rd], cpu_R[ra]);
+ tcg_gen_ctzi_tl(cpu_R[rd], cpu_R[ra], -1);
+ tcg_gen_addi_tl(cpu_R[rd], cpu_R[rd], 1);
break;
case 0x01: /* l.fl1 */
LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb);
- gen_helper_fl1(cpu_R[rd], cpu_R[ra]);
+ tcg_gen_clzi_tl(cpu_R[rd], cpu_R[ra], TARGET_LONG_BITS);
+ tcg_gen_subfi_tl(cpu_R[rd], TARGET_LONG_BITS, cpu_R[rd]);
break;
default:
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index da00f0ab49..0a8fbba3c5 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -38,17 +38,12 @@ DEF_HELPER_4(divde, i64, env, i64, i64, i32)
DEF_HELPER_4(divweu, tl, env, tl, tl, i32)
DEF_HELPER_4(divwe, tl, env, tl, tl, i32)
-DEF_HELPER_FLAGS_1(cntlzw, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(cnttzw, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
DEF_HELPER_3(sraw, tl, env, tl, tl)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl)
-DEF_HELPER_FLAGS_1(cntlzd, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(cnttzd, TCG_CALL_NO_RWG_SE, tl, tl)
-DEF_HELPER_FLAGS_1(popcntd, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_3(srad, tl, env, tl, tl)
DEF_HELPER_0(darn32, tl)
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
index 62138163a5..2627a70176 100644
--- a/target/ppc/helper_regs.h
+++ b/target/ppc/helper_regs.h
@@ -161,7 +161,7 @@ static inline void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
- tlb_flush(cs, 1);
+ tlb_flush(cs);
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
}
@@ -176,7 +176,7 @@ static inline void check_tlb_flush(CPUPPCState *env, bool global)
CPUPPCState *other_env = &cpu->env;
other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
- tlb_flush(other_cs, 1);
+ tlb_flush(other_cs);
}
}
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index 2d57c9a1c2..1871792ff6 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -141,16 +141,6 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
#endif
-target_ulong helper_cntlzw(target_ulong t)
-{
- return clz32(t);
-}
-
-target_ulong helper_cnttzw(target_ulong t)
-{
- return ctz32(t);
-}
-
#if defined(TARGET_PPC64)
/* if x = 0xab, returns 0xababababababababa */
#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
@@ -174,16 +164,6 @@ uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
#undef haszero
#undef hasvalue
-target_ulong helper_cntlzd(target_ulong t)
-{
- return clz64(t);
-}
-
-target_ulong helper_cnttzd(target_ulong t)
-{
- return ctz64(t);
-}
-
/* Return invalid random number.
*
* FIXME: Add rng backend or other mechanism to get cryptographically suitable
@@ -292,6 +272,7 @@ target_ulong helper_srad(CPUPPCState *env, target_ulong value,
#if defined(TARGET_PPC64)
target_ulong helper_popcntb(target_ulong val)
{
+ /* Note that we don't fold past bytes */
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
0x5555555555555555ULL);
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
@@ -303,6 +284,7 @@ target_ulong helper_popcntb(target_ulong val)
target_ulong helper_popcntw(target_ulong val)
{
+ /* Note that we don't fold past words. */
val = (val & 0x5555555555555555ULL) + ((val >> 1) &
0x5555555555555555ULL);
val = (val & 0x3333333333333333ULL) + ((val >> 2) &
@@ -315,29 +297,15 @@ target_ulong helper_popcntw(target_ulong val)
0x0000ffff0000ffffULL);
return val;
}
-
-target_ulong helper_popcntd(target_ulong val)
-{
- return ctpop64(val);
-}
#else
target_ulong helper_popcntb(target_ulong val)
{
+ /* Note that we don't fold past bytes */
val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
return val;
}
-
-target_ulong helper_popcntw(target_ulong val)
-{
- val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
- val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
- val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
- val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
- val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
- return val;
-}
#endif
/*****************************************************************************/
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 9c4834c4fc..ec92c64159 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -26,7 +26,7 @@
#include "cpu.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "sysemu/numa.h"
#include "kvm_ppc.h"
#include "sysemu/cpus.h"
@@ -145,6 +145,11 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
return 0;
}
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+ return 0;
+}
+
static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
{
CPUPPCState *cenv = &cpu->env;
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index bd1d78bfbe..4b43283913 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -315,16 +315,4 @@ static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
#endif /* CONFIG_KVM */
-#ifndef KVM_INTERRUPT_SET
-#define KVM_INTERRUPT_SET -1
-#endif
-
-#ifndef KVM_INTERRUPT_UNSET
-#define KVM_INTERRUPT_UNSET -2
-#endif
-
-#ifndef KVM_INTERRUPT_SET_LEVEL
-#define KVM_INTERRUPT_SET_LEVEL -3
-#endif
-
#endif /* KVM_PPC_H */
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
index 18c16d2512..df9f7a4e05 100644
--- a/target/ppc/machine.c
+++ b/target/ppc/machine.c
@@ -105,7 +105,7 @@ static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static int get_avr(QEMUFile *f, void *pv, size_t size)
+static int get_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
ppc_avr_t *v = pv;
@@ -115,12 +115,14 @@ static int get_avr(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_avr(QEMUFile *f, void *pv, size_t size)
+static int put_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
ppc_avr_t *v = pv;
qemu_put_be64(f, v->u64[0]);
qemu_put_be64(f, v->u64[1]);
+ return 0;
}
static const VMStateInfo vmstate_info_avr = {
@@ -353,7 +355,7 @@ static const VMStateDescription vmstate_sr = {
};
#ifdef TARGET_PPC64
-static int get_slbe(QEMUFile *f, void *pv, size_t size)
+static int get_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field)
{
ppc_slb_t *v = pv;
@@ -363,12 +365,14 @@ static int get_slbe(QEMUFile *f, void *pv, size_t size)
return 0;
}
-static void put_slbe(QEMUFile *f, void *pv, size_t size)
+static int put_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
ppc_slb_t *v = pv;
qemu_put_be64(f, v->esid);
qemu_put_be64(f, v->vsid);
+ return 0;
}
static const VMStateInfo vmstate_info_slbe = {
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
index 1e6e705a4e..ab432bafaf 100644
--- a/target/ppc/misc_helper.c
+++ b/target/ppc/misc_helper.c
@@ -85,7 +85,7 @@ void helper_store_sdr1(CPUPPCState *env, target_ulong val)
if (!env->external_htab) {
if (env->spr[SPR_SDR1] != val) {
ppc_store_sdr1(env, val);
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
}
}
@@ -114,7 +114,7 @@ void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value)
if (likely(env->pb[num] != value)) {
env->pb[num] = value;
/* Should be optimized */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
}
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index fdb7a787bf..0efc8c63fa 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -23,7 +23,7 @@
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#include "qemu/error-report.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
#include "exec/log.h"
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
index d09fc0a85f..f746f53615 100644
--- a/target/ppc/mmu_helper.c
+++ b/target/ppc/mmu_helper.c
@@ -248,7 +248,7 @@ static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
tlb = &env->tlb.tlb6[nr];
pte_invalidate(&tlb->pte0);
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
@@ -661,7 +661,7 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
tlb = &env->tlb.tlbe[i];
tlb->prot &= ~PAGE_VALID;
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
@@ -863,7 +863,7 @@ static void booke206_flush_tlb(CPUPPCState *env, int flags,
tlb += booke206_tlb_size(env, i);
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
@@ -1769,7 +1769,7 @@ void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->IBAT[0][nr], mask);
#else
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
#endif
}
}
@@ -1804,7 +1804,7 @@ void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
#if !defined(FLUSH_ALL_TLBS)
do_invalidate_BAT(env, env->DBAT[0][nr], mask);
#else
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
#endif
}
}
@@ -1852,7 +1852,7 @@ void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
}
#if defined(FLUSH_ALL_TLBS)
if (do_inval) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
#endif
}
@@ -1892,7 +1892,7 @@ void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
env->DBAT[1][nr] = value;
#if defined(FLUSH_ALL_TLBS)
if (do_inval) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
#endif
}
@@ -1921,7 +1921,7 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
break;
case POWERPC_MMU_BOOKE:
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
break;
case POWERPC_MMU_BOOKE206:
booke206_flush_tlb(env, -1, 0);
@@ -1937,7 +1937,7 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
case POWERPC_MMU_2_07a:
#endif /* defined(TARGET_PPC64) */
env->tlb_need_flush = 0;
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
break;
default:
/* XXX: TODO */
@@ -2433,13 +2433,13 @@ void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
}
tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
if (do_flush_tlbs) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
break;
case 1:
RPN = value & 0xFFFFFC0F;
if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
tlb->RPN = RPN;
break;
@@ -2555,7 +2555,7 @@ void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
env->spr[pidn] = pid;
/* changing PIDs mean we're in a different address space now */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
void helper_booke206_tlbwe(CPUPPCState *env)
@@ -2650,7 +2650,7 @@ void helper_booke206_tlbwe(CPUPPCState *env)
if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK);
} else {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
}
@@ -2775,7 +2775,7 @@ void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
/* flush TLB1 entries */
booke206_invalidate_ea_tlb(env, 1, address);
CPU_FOREACH(cs) {
- tlb_flush(cs, 1);
+ tlb_flush(cs);
}
} else {
/* flush TLB0 entries */
@@ -2811,7 +2811,7 @@ void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
}
tlb += booke206_tlb_size(env, i);
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
@@ -2852,7 +2852,7 @@ void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
tlb->mas1 &= ~MAS1_VALID;
}
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 59e9552d2b..121218087f 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -1641,7 +1641,13 @@ static void gen_andis_(DisasContext *ctx)
/* cntlzw */
static void gen_cntlzw(DisasContext *ctx)
{
- gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -1649,7 +1655,13 @@ static void gen_cntlzw(DisasContext *ctx)
/* cnttzw */
static void gen_cnttzw(DisasContext *ctx)
{
- gen_helper_cnttzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ TCGv_i32 t = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_ctzi_i32(t, t, 32);
+ tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -1832,14 +1844,18 @@ static void gen_popcntb(DisasContext *ctx)
static void gen_popcntw(DisasContext *ctx)
{
+#if defined(TARGET_PPC64)
gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+#else
+ tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+#endif
}
#if defined(TARGET_PPC64)
/* popcntd: PowerPC 2.06 specification */
static void gen_popcntd(DisasContext *ctx)
{
- gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
}
#endif
@@ -1891,7 +1907,7 @@ GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
/* cntlzd */
static void gen_cntlzd(DisasContext *ctx)
{
- gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -1899,7 +1915,7 @@ static void gen_cntlzd(DisasContext *ctx)
/* cnttzd */
static void gen_cnttzd(DisasContext *ctx)
{
- gen_helper_cnttzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
if (unlikely(Rc(ctx->opcode) != 0)) {
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
@@ -1975,16 +1991,16 @@ static void gen_rlwinm(DisasContext *ctx)
{
TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
- uint32_t sh = SH(ctx->opcode);
- uint32_t mb = MB(ctx->opcode);
- uint32_t me = ME(ctx->opcode);
-
- if (mb == 0 && me == (31 - sh)) {
- tcg_gen_shli_tl(t_ra, t_rs, sh);
- tcg_gen_ext32u_tl(t_ra, t_ra);
- } else if (sh != 0 && me == 31 && sh == (32 - mb)) {
- tcg_gen_ext32u_tl(t_ra, t_rs);
- tcg_gen_shri_tl(t_ra, t_ra, mb);
+ int sh = SH(ctx->opcode);
+ int mb = MB(ctx->opcode);
+ int me = ME(ctx->opcode);
+ int len = me - mb + 1;
+ int rsh = (32 - sh) & 31;
+
+ if (sh != 0 && len > 0 && me == (31 - sh)) {
+ tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
+ } else if (me == 31 && rsh + len <= 32) {
+ tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
} else {
target_ulong mask;
#if defined(TARGET_PPC64)
@@ -1992,8 +2008,9 @@ static void gen_rlwinm(DisasContext *ctx)
me += 32;
#endif
mask = MASK(mb, me);
-
- if (mask <= 0xffffffffu) {
+ if (sh == 0) {
+ tcg_gen_andi_tl(t_ra, t_rs, mask);
+ } else if (mask <= 0xffffffffu) {
TCGv_i32 t0 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t0, t_rs);
tcg_gen_rotli_i32(t0, t0, sh);
@@ -2096,11 +2113,13 @@ static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
{
TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ int len = me - mb + 1;
+ int rsh = (64 - sh) & 63;
- if (sh != 0 && mb == 0 && me == (63 - sh)) {
- tcg_gen_shli_tl(t_ra, t_rs, sh);
- } else if (sh != 0 && me == 63 && sh == (64 - mb)) {
- tcg_gen_shri_tl(t_ra, t_rs, mb);
+ if (sh != 0 && len > 0 && me == (63 - sh)) {
+ tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
+ } else if (me == 63 && rsh + len <= 64) {
+ tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
} else {
tcg_gen_rotli_tl(t_ra, t_rs, sh);
tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c
index 626e03186c..94dfcd7afc 100644
--- a/target/ppc/translate_init.c
+++ b/target/ppc/translate_init.c
@@ -21,10 +21,10 @@
#include "qemu/osdep.h"
#include "disas/bfd.h"
#include "exec/gdbstub.h"
-#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "sysemu/arch_init.h"
#include "sysemu/cpus.h"
+#include "sysemu/hw_accel.h"
#include "cpu-models.h"
#include "mmu-hash32.h"
#include "mmu-hash64.h"
@@ -10305,6 +10305,7 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
info = g_malloc0(sizeof(*info));
info->name = g_strdup(alias->alias);
+ info->q_typename = g_strdup(object_class_get_name(oc));
entry = g_malloc0(sizeof(*entry));
entry->value = info;
@@ -10415,9 +10416,6 @@ static void ppc_cpu_reset(CPUState *s)
}
env->spr[i] = spr->default_value;
}
-
- /* Flush all TLBs */
- tlb_flush(s, 1);
}
#ifndef CONFIG_USER_ONLY
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
index 0a39d31237..066dcd17df 100644
--- a/target/s390x/cpu.c
+++ b/target/s390x/cpu.c
@@ -82,7 +82,6 @@ static void s390_cpu_reset(CPUState *s)
scc->parent_reset(s);
cpu->env.sigp_order = 0;
s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
- tlb_flush(s, 1);
}
/* S390CPUClass::initial_reset() */
@@ -94,7 +93,7 @@ static void s390_cpu_initial_reset(CPUState *s)
s390_cpu_reset(s);
/* initial reset does not touch regs,fregs and aregs */
- memset(&env->fpc, 0, offsetof(CPUS390XState, cpu_num) -
+ memset(&env->fpc, 0, offsetof(CPUS390XState, end_reset_fields) -
offsetof(CPUS390XState, fpc));
/* architectured initial values for CR 0 and 14 */
@@ -118,7 +117,6 @@ static void s390_cpu_initial_reset(CPUState *s)
if (kvm_enabled()) {
kvm_s390_reset_vcpu(cpu);
}
- tlb_flush(s, 1);
}
/* CPUClass:reset() */
@@ -133,7 +131,7 @@ static void s390_cpu_full_reset(CPUState *s)
cpu->env.sigp_order = 0;
s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
- memset(env, 0, offsetof(CPUS390XState, cpu_num));
+ memset(env, 0, offsetof(CPUS390XState, end_reset_fields));
/* architectured initial values for CR 0 and 14 */
env->cregs[0] = CR0_RESET;
@@ -156,7 +154,6 @@ static void s390_cpu_full_reset(CPUState *s)
if (kvm_enabled()) {
kvm_s390_reset_vcpu(cpu);
}
- tlb_flush(s, 1);
}
#if !defined(CONFIG_USER_ONLY)
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index fd36a25cf5..058ddad83a 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -139,9 +139,10 @@ typedef struct CPUS390XState {
uint8_t riccb[64];
- CPU_COMMON
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
- /* reset does memset(0) up to here */
+ CPU_COMMON
uint32_t cpu_num;
uint32_t machine_type;
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index c1e729df5e..5b66d3325d 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -290,6 +290,7 @@ static void create_cpu_model_list(ObjectClass *klass, void *opaque)
info->has_migration_safe = true;
info->migration_safe = scc->is_migration_safe;
info->q_static = scc->is_static;
+ info->q_typename = g_strdup(object_class_get_name(klass));
entry = g_malloc0(sizeof(*entry));
diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c
index 3d223dec97..94ab74d58f 100644
--- a/target/s390x/gdbstub.c
+++ b/target/s390x/gdbstub.c
@@ -23,6 +23,7 @@
#include "exec/exec-all.h"
#include "exec/gdbstub.h"
#include "qemu/bitops.h"
+#include "sysemu/hw_accel.h"
int s390_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
{
@@ -199,7 +200,7 @@ static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
case S390_C0_REGNUM ... S390_C15_REGNUM:
env->cregs[n] = ldtul_p(mem_buf);
if (tcg_enabled()) {
- tlb_flush(ENV_GET_CPU(env), 1);
+ tlb_flush(ENV_GET_CPU(env));
}
cpu_synchronize_post_init(ENV_GET_CPU(env));
return 8;
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 207a6e7d1c..9102071d0a 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -70,7 +70,6 @@ DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64)
-DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64)
diff --git a/target/s390x/int_helper.c b/target/s390x/int_helper.c
index 370c94da55..f26f36a904 100644
--- a/target/s390x/int_helper.c
+++ b/target/s390x/int_helper.c
@@ -117,12 +117,6 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
return ret;
}
-/* count leading zeros, for find leftmost one */
-uint64_t HELPER(clz)(uint64_t v)
-{
- return clz64(v);
-}
-
uint64_t HELPER(cvd)(int32_t reg)
{
/* positive 0 */
@@ -143,14 +137,11 @@ uint64_t HELPER(cvd)(int32_t reg)
return dec;
}
-uint64_t HELPER(popcnt)(uint64_t r2)
+uint64_t HELPER(popcnt)(uint64_t val)
{
- uint64_t ret = 0;
- int i;
-
- for (i = 0; i < 64; i += 8) {
- uint64_t t = ctpop32((r2 >> i) & 0xff);
- ret |= t << i;
- }
- return ret;
+ /* Note that we don't fold past bytes. */
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
+ val = (val + (val >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
+ return val;
}
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index 97afe02599..6ed387671e 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -32,7 +32,7 @@
#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "hw/hw.h"
#include "sysemu/device_tree.h"
#include "qapi/qmp/qjson.h"
@@ -197,7 +197,7 @@ void kvm_s390_cmma_reset(void)
.attr = KVM_S390_VM_MEM_CLR_CMMA,
};
- if (!mem_path || !kvm_s390_cmma_available()) {
+ if (mem_path || !kvm_s390_cmma_available()) {
return;
}
@@ -294,6 +294,11 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
return 0;
}
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+ return 0;
+}
+
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
{
return cpu->cpu_index;
@@ -2301,7 +2306,7 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint32_t idx = data >> ZPCI_MSI_VEC_BITS;
uint32_t vec = data & ZPCI_MSI_VEC_MASK;
- pbdev = s390_pci_find_dev_by_idx(idx);
+ pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), idx);
if (!pbdev) {
DPRINTF("add_msi_route no dev\n");
return -ENODEV;
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index 99bc5e2834..675aba2e44 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -872,7 +872,7 @@ void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
s390_cpu_recompute_watchpoints(CPU(cpu));
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
@@ -900,7 +900,7 @@ void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
s390_cpu_recompute_watchpoints(CPU(cpu));
}
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
@@ -1036,7 +1036,7 @@ uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
if (r2 & 0x3) {
/* flush TLB / ALB */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
cc = 0;
} else {
@@ -1121,7 +1121,7 @@ void HELPER(ptlb)(CPUS390XState *env)
{
S390CPU *cpu = s390_env_get_cpu(env);
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
/* load using real address */
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index 02bc7058fd..01c62176bf 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -2249,7 +2249,7 @@ static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
/* R1 = IN ? CLZ(IN) : 64. */
- gen_helper_clz(o->out, o->in2);
+ tcg_gen_clzi_i64(o->out, o->in2, 64);
/* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
value by 64, which is undefined. But since the shift is 64 iff the
@@ -3134,20 +3134,26 @@ static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
}
}
- /* In some cases we can implement this with deposit, which can be more
- efficient on some hosts. */
- if (~mask == imask && i3 <= i4) {
- if (s->fields->op2 == 0x5d) {
- i3 += 32, i4 += 32;
- }
+ len = i4 - i3 + 1;
+ pos = 63 - i4;
+ rot = i5 & 63;
+ if (s->fields->op2 == 0x5d) {
+ pos += 32;
+ }
+
+ /* In some cases we can implement this with extract. */
+ if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
+ tcg_gen_extract_i64(o->out, o->in2, rot, len);
+ return NO_EXIT;
+ }
+
+ /* In some cases we can implement this with deposit. */
+ if (len > 0 && (imask == 0 || ~mask == imask)) {
/* Note that we rotate the bits to be inserted to the lsb, not to
the position as described in the PoO. */
- len = i4 - i3 + 1;
- pos = 63 - i4;
- rot = (i5 - pos) & 63;
+ rot = (rot - pos) & 63;
} else {
- pos = len = -1;
- rot = i5 & 63;
+ pos = -1;
}
/* Rotate the input as necessary. */
@@ -3155,7 +3161,11 @@ static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
/* Insert the selected bits into the output. */
if (pos >= 0) {
- tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
+ if (imask == 0) {
+ tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
+ } else {
+ tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
+ }
} else if (imask == 0) {
tcg_gen_andi_i64(o->out, o->in2, mask);
} else {
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
index a38f6a6ded..9a481c35dc 100644
--- a/target/sh4/cpu.c
+++ b/target/sh4/cpu.c
@@ -56,8 +56,7 @@ static void superh_cpu_reset(CPUState *s)
scc->parent_reset(s);
- memset(env, 0, offsetof(CPUSH4State, id));
- tlb_flush(s, 1);
+ memset(env, 0, offsetof(CPUSH4State, end_reset_fields));
env->pc = 0xA0000000;
#if defined(CONFIG_USER_ONLY)
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 478ab55868..cad8989f7e 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -175,6 +175,9 @@ typedef struct CPUSH4State {
uint32_t ldst;
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* Fields from here on are preserved over CPU reset. */
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index a33ac697c5..036c5ca56c 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -583,7 +583,7 @@ void cpu_load_tlb(CPUSH4State * env)
entry->v = 0;
}
- tlb_flush(CPU(sh_env_get_cpu(s)), 1);
+ tlb_flush(CPU(sh_env_get_cpu(s)));
}
uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
diff --git a/target/sparc/asi.h b/target/sparc/asi.h
index c9a1849600..d8d6284125 100644
--- a/target/sparc/asi.h
+++ b/target/sparc/asi.h
@@ -211,6 +211,7 @@
#define ASI_AFSR 0x4c /* Async fault status register */
#define ASI_AFAR 0x4d /* Async fault address register */
#define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */
+#define ASI_HYP_SCRATCHPAD 0x4f /* (4V) Hypervisor scratchpad */
#define ASI_IMMU 0x50 /* Insn-MMU main register space */
#define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */
#define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
index 4e07b92fbd..d606eb53f4 100644
--- a/target/sparc/cpu.c
+++ b/target/sparc/cpu.c
@@ -36,8 +36,7 @@ static void sparc_cpu_reset(CPUState *s)
scc->parent_reset(s);
- memset(env, 0, offsetof(CPUSPARCState, version));
- tlb_flush(s, 1);
+ memset(env, 0, offsetof(CPUSPARCState, end_reset_fields));
env->cwp = 0;
#ifndef TARGET_SPARC64
env->wim = 1;
@@ -58,9 +57,13 @@ static void sparc_cpu_reset(CPUState *s)
env->psrps = 1;
#endif
#ifdef TARGET_SPARC64
- env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG;
+ env->pstate = PS_PRIV | PS_RED | PS_PEF;
+ if (!cpu_has_hypervisor(env)) {
+ env->pstate |= PS_AG;
+ }
env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0;
env->tl = env->maxtl;
+ env->gl = 2;
cpu_tsptr(env)->tt = TT_POWER_ON_RESET;
env->lsu = 0;
#else
@@ -745,14 +748,17 @@ void sparc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << PSR_CARRY_SHIFT);
cpu_fprintf(f, " xcc: ");
cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4));
- cpu_fprintf(f, ") asi: %02x tl: %d pil: %x\n", env->asi, env->tl,
- env->psrpil);
+ cpu_fprintf(f, ") asi: %02x tl: %d pil: %x gl: %d\n", env->asi, env->tl,
+ env->psrpil, env->gl);
+ cpu_fprintf(f, "tbr: " TARGET_FMT_lx " hpstate: " TARGET_FMT_lx " htba: "
+ TARGET_FMT_lx "\n", env->tbr, env->hpstate, env->htba);
cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d "
"cleanwin: %d cwp: %d\n",
env->cansave, env->canrestore, env->otherwin, env->wstate,
env->cleanwin, env->nwindows - 1 - env->cwp);
cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: "
TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs);
+
#else
cpu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env));
cpu_print_cc(f, cpu_fprintf, cpu_get_psr(env));
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
index 5fb0ed1aad..95a36a4bdc 100644
--- a/target/sparc/cpu.h
+++ b/target/sparc/cpu.h
@@ -68,6 +68,8 @@
#define TT_DATA_ACCESS 0x32
#define TT_UNALIGNED 0x34
#define TT_PRIV_ACT 0x37
+#define TT_INSN_REAL_TRANSLATION_MISS 0x3e
+#define TT_DATA_REAL_TRANSLATION_MISS 0x3f
#define TT_EXTINT 0x40
#define TT_IVEC 0x60
#define TT_TMISS 0x64
@@ -77,6 +79,7 @@
#define TT_FILL 0xc0
#define TT_WOTHER (1 << 5)
#define TT_TRAP 0x100
+#define TT_HTRAP 0x180
#endif
#define PSR_NEG_SHIFT 23
@@ -227,7 +230,7 @@ enum {
#if !defined(TARGET_SPARC64)
#define NB_MMU_MODES 3
#else
-#define NB_MMU_MODES 7
+#define NB_MMU_MODES 6
typedef struct trap_state {
uint64_t tpc;
uint64_t tnpc;
@@ -302,21 +305,42 @@ enum {
#define TTE_W_OK_BIT (1ULL << 1)
#define TTE_GLOBAL_BIT (1ULL << 0)
+#define TTE_NFO_BIT_UA2005 (1ULL << 62)
+#define TTE_USED_BIT_UA2005 (1ULL << 47)
+#define TTE_LOCKED_BIT_UA2005 (1ULL << 61)
+#define TTE_SIDEEFFECT_BIT_UA2005 (1ULL << 11)
+#define TTE_PRIV_BIT_UA2005 (1ULL << 8)
+#define TTE_W_OK_BIT_UA2005 (1ULL << 6)
+
#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT)
#define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT)
#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT)
#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT)
#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)
+#define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005)
#define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT)
#define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT)
+
+#define TTE_IS_NFO_UA2005(tte) ((tte) & TTE_NFO_BIT_UA2005)
+#define TTE_IS_USED_UA2005(tte) ((tte) & TTE_USED_BIT_UA2005)
+#define TTE_IS_LOCKED_UA2005(tte) ((tte) & TTE_LOCKED_BIT_UA2005)
+#define TTE_IS_SIDEEFFECT_UA2005(tte) ((tte) & TTE_SIDEEFFECT_BIT_UA2005)
+#define TTE_IS_PRIV_UA2005(tte) ((tte) & TTE_PRIV_BIT_UA2005)
+#define TTE_IS_W_OK_UA2005(tte) ((tte) & TTE_W_OK_BIT_UA2005)
+
#define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT)
#define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT)
#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
#define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL)
+#define TTE_PGSIZE_UA2005(tte) ((tte) & 7ULL)
#define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL)
+/* UltraSPARC T1 specific */
+#define TLB_UST1_IS_REAL_BIT (1ULL << 9) /* Real translation entry */
+#define TLB_UST1_IS_SUN4V_BIT (1ULL << 10) /* sun4u/sun4v TTE format switch */
+
#define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */
#define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */
#define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */
@@ -360,6 +384,9 @@ enum {
#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
+
typedef struct SparcTLBEntry {
uint64_t tag;
uint64_t tte;
@@ -380,7 +407,24 @@ struct CPUTimer
typedef struct CPUTimer CPUTimer;
typedef struct CPUSPARCState CPUSPARCState;
-
+#if defined(TARGET_SPARC64)
+typedef union {
+ uint64_t mmuregs[16];
+ struct {
+ uint64_t tsb_tag_target;
+ uint64_t mmu_primary_context;
+ uint64_t mmu_secondary_context;
+ uint64_t sfsr;
+ uint64_t sfar;
+ uint64_t tsb;
+ uint64_t tag_access;
+ uint64_t virtual_watchpoint;
+ uint64_t physical_watchpoint;
+ uint64_t sun4v_ctx_config[2];
+ uint64_t sun4v_tsb_pointers[4];
+ };
+} SparcV9MMU;
+#endif
struct CPUSPARCState {
target_ulong gregs[8]; /* general registers */
target_ulong *regwptr; /* pointer to current register window */
@@ -419,6 +463,9 @@ struct CPUSPARCState {
/* NOTE: we allow 8 more registers to handle wrapping */
target_ulong regbase[MAX_NWINDOWS * 16 + 8];
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
@@ -430,31 +477,8 @@ struct CPUSPARCState {
uint64_t lsu;
#define DMMU_E 0x8
#define IMMU_E 0x4
- //typedef struct SparcMMU
- union {
- uint64_t immuregs[16];
- struct {
- uint64_t tsb_tag_target;
- uint64_t unused_mmu_primary_context; // use DMMU
- uint64_t unused_mmu_secondary_context; // use DMMU
- uint64_t sfsr;
- uint64_t sfar;
- uint64_t tsb;
- uint64_t tag_access;
- } immu;
- };
- union {
- uint64_t dmmuregs[16];
- struct {
- uint64_t tsb_tag_target;
- uint64_t mmu_primary_context;
- uint64_t mmu_secondary_context;
- uint64_t sfsr;
- uint64_t sfar;
- uint64_t tsb;
- uint64_t tag_access;
- } dmmu;
- };
+ SparcV9MMU immu;
+ SparcV9MMU dmmu;
SparcTLBEntry itlb[64];
SparcTLBEntry dtlb[64];
uint32_t mmu_version;
@@ -484,6 +508,7 @@ struct CPUSPARCState {
uint64_t bgregs[8]; /* backup for normal global registers */
uint64_t igregs[8]; /* interrupt general registers */
uint64_t mgregs[8]; /* mmu general registers */
+ uint64_t glregs[8 * MAXTL_MAX];
uint64_t fprs;
uint64_t tick_cmpr, stick_cmpr;
CPUTimer *tick, *stick;
@@ -493,6 +518,7 @@ struct CPUSPARCState {
uint32_t gl; // UA2005
/* UA 2005 hyperprivileged registers */
uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr;
+ uint64_t scratch[8];
CPUTimer *hstick; // UA 2005
/* Interrupt vector registers */
uint64_t ivec_status;
@@ -583,6 +609,7 @@ void cpu_put_ccr(CPUSPARCState *env1, target_ulong val);
target_ulong cpu_get_cwp64(CPUSPARCState *env1);
void cpu_put_cwp64(CPUSPARCState *env1, int cwp);
void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate);
+void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl);
#endif
int cpu_cwp_inc(CPUSPARCState *env1, int cwp);
int cpu_cwp_dec(CPUSPARCState *env1, int cwp);
@@ -642,8 +669,7 @@ int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
#define MMU_KERNEL_IDX 2
#define MMU_KERNEL_SECONDARY_IDX 3
#define MMU_NUCLEUS_IDX 4
-#define MMU_HYPV_IDX 5
-#define MMU_PHYS_IDX 6
+#define MMU_PHYS_IDX 5
#else
#define MMU_USER_IDX 0
#define MMU_KERNEL_IDX 1
@@ -665,6 +691,11 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1)
{
return env1->pstate & PS_PRIV;
}
+#else
+static inline int cpu_supervisor_mode(CPUSPARCState *env1)
+{
+ return env1->psrs;
+}
#endif
static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
@@ -683,10 +714,10 @@ static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
: (env->lsu & DMMU_E) == 0) {
return MMU_PHYS_IDX;
+ } else if (cpu_hypervisor_mode(env)) {
+ return MMU_PHYS_IDX;
} else if (env->tl > 0) {
return MMU_NUCLEUS_IDX;
- } else if (cpu_hypervisor_mode(env)) {
- return MMU_HYPV_IDX;
} else if (cpu_supervisor_mode(env)) {
return MMU_KERNEL_IDX;
} else {
@@ -701,8 +732,9 @@ static inline int cpu_interrupts_enabled(CPUSPARCState *env1)
if (env1->psret != 0)
return 1;
#else
- if (env1->pstate & PS_IE)
+ if ((env1->pstate & PS_IE) && !cpu_hypervisor_mode(env1)) {
return 1;
+ }
#endif
return 0;
@@ -731,6 +763,8 @@ trap_state* cpu_tsptr(CPUSPARCState* env);
#define TB_FLAG_MMU_MASK 7
#define TB_FLAG_FPU_ENABLED (1 << 4)
#define TB_FLAG_AM_ENABLED (1 << 5)
+#define TB_FLAG_SUPER (1 << 6)
+#define TB_FLAG_HYPER (1 << 7)
#define TB_FLAG_ASI_SHIFT 24
static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
@@ -740,7 +774,17 @@ static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
*pc = env->pc;
*cs_base = env->npc;
flags = cpu_mmu_index(env, false);
+#ifndef CONFIG_USER_ONLY
+ if (cpu_supervisor_mode(env)) {
+ flags |= TB_FLAG_SUPER;
+ }
+#endif
#ifdef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+ if (cpu_hypervisor_mode(env)) {
+ flags |= TB_FLAG_HYPER;
+ }
+#endif
if (env->pstate & PS_AM) {
flags |= TB_FLAG_AM_ENABLED;
}
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
index 359b0b15ed..1d854890b4 100644
--- a/target/sparc/helper.c
+++ b/target/sparc/helper.c
@@ -49,11 +49,6 @@ void helper_debug(CPUSPARCState *env)
}
#ifdef TARGET_SPARC64
-target_ulong helper_popc(target_ulong val)
-{
- return ctpop64(val);
-}
-
void helper_tick_set_count(void *opaque, uint64_t count)
{
#if !defined(CONFIG_USER_ONLY)
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
index 0cf1bfb73a..b8f1e78c75 100644
--- a/target/sparc/helper.h
+++ b/target/sparc/helper.h
@@ -5,6 +5,7 @@ DEF_HELPER_1(rdpsr, tl, env)
DEF_HELPER_1(power_down, void, env)
#else
DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(wrgl, void, env, tl)
DEF_HELPER_2(wrpstate, void, env, tl)
DEF_HELPER_1(done, void, env)
DEF_HELPER_1(retry, void, env)
@@ -16,7 +17,6 @@ DEF_HELPER_2(wrccr, void, env, tl)
DEF_HELPER_1(rdcwp, tl, env)
DEF_HELPER_2(wrcwp, void, env, tl)
DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl)
-DEF_HELPER_FLAGS_1(popc, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(set_softint, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_2(clear_softint, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_2(write_softint, TCG_CALL_NO_RWG, void, env, i64)
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
index 29360fa5fe..605747c93c 100644
--- a/target/sparc/int64_helper.c
+++ b/target/sparc/int64_helper.c
@@ -78,8 +78,10 @@ void sparc_cpu_do_interrupt(CPUState *cs)
static int count;
const char *name;
- if (intno < 0 || intno >= 0x180) {
+ if (intno < 0 || intno >= 0x1ff) {
name = "Unknown";
+ } else if (intno >= 0x180) {
+ name = "Hyperprivileged Trap Instruction";
} else if (intno >= 0x100) {
name = "Trap Instruction";
} else if (intno >= 0xc0) {
@@ -135,16 +137,42 @@ void sparc_cpu_do_interrupt(CPUState *cs)
tsptr->tnpc = env->npc;
tsptr->tt = intno;
+ if (cpu_has_hypervisor(env)) {
+ env->htstate[env->tl] = env->hpstate;
+ /* XXX OpenSPARC T1 - UltraSPARC T3 have MAXPTL=2
+ but this may change in the future */
+ if (env->tl > 2) {
+ env->hpstate |= HS_PRIV;
+ }
+ }
+
+ if (env->def->features & CPU_FEATURE_GL) {
+ tsptr->tstate |= (env->gl & 7ULL) << 40;
+ cpu_gl_switch_gregs(env, env->gl + 1);
+ env->gl++;
+ }
+
switch (intno) {
case TT_IVEC:
- cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG);
+ if (!cpu_has_hypervisor(env)) {
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG);
+ }
break;
case TT_TFAULT:
case TT_DFAULT:
case TT_TMISS ... TT_TMISS + 3:
case TT_DMISS ... TT_DMISS + 3:
case TT_DPROT ... TT_DPROT + 3:
- cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG);
+ if (cpu_has_hypervisor(env)) {
+ env->hpstate |= HS_PRIV;
+ env->pstate = PS_PEF | PS_PRIV;
+ } else {
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG);
+ }
+ break;
+ case TT_INSN_REAL_TRANSLATION_MISS ... TT_DATA_REAL_TRANSLATION_MISS:
+ case TT_HTRAP ... TT_HTRAP + 127:
+ env->hpstate |= HS_PRIV;
break;
default:
cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG);
@@ -158,8 +186,13 @@ void sparc_cpu_do_interrupt(CPUState *cs)
} else if ((intno & 0x1c0) == TT_FILL) {
cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
}
- env->pc = env->tbr & ~0x7fffULL;
- env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
+
+ if (cpu_hypervisor_mode(env)) {
+ env->pc = (env->htba & ~0x3fffULL) | (intno << 5);
+ } else {
+ env->pc = env->tbr & ~0x7fffULL;
+ env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
+ }
env->npc = env->pc + 4;
cs->exception_index = -1;
}
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
index de7d53ae20..2c05d6af75 100644
--- a/target/sparc/ldst_helper.c
+++ b/target/sparc/ldst_helper.c
@@ -70,44 +70,47 @@
#define QT1 (env->qt1)
#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
-/* Calculates TSB pointer value for fault page size 8k or 64k */
-static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
- uint64_t tag_access_register,
- int page_size)
+/* Calculates TSB pointer value for fault page size
+ * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers
+ * UA2005 holds the page size configuration in mmu_ctx registers */
+static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env,
+ const SparcV9MMU *mmu, const int idx)
{
- uint64_t tsb_base = tsb_register & ~0x1fffULL;
+ uint64_t tsb_register;
+ int page_size;
+ if (cpu_has_hypervisor(env)) {
+ int tsb_index = 0;
+ int ctx = mmu->tag_access & 0x1fffULL;
+ uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0];
+ tsb_index = idx;
+ tsb_index |= ctx ? 2 : 0;
+ page_size = idx ? ctx_register >> 8 : ctx_register;
+ page_size &= 7;
+ tsb_register = mmu->sun4v_tsb_pointers[tsb_index];
+ } else {
+ page_size = idx;
+ tsb_register = mmu->tsb;
+ }
int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
int tsb_size = tsb_register & 0xf;
- /* discard lower 13 bits which hold tag access context */
- uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
-
- /* now reorder bits */
- uint64_t tsb_base_mask = ~0x1fffULL;
- uint64_t va = tag_access_va;
+ uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size;
- /* move va bits to correct position */
- if (page_size == 8*1024) {
- va >>= 9;
- } else if (page_size == 64*1024) {
- va >>= 12;
- }
-
- if (tsb_size) {
- tsb_base_mask <<= tsb_size;
- }
+ /* move va bits to correct position,
+ * the context bits will be masked out later */
+ uint64_t va = mmu->tag_access >> (3 * page_size + 9);
/* calculate tsb_base mask and adjust va if split is in use */
if (tsb_split) {
- if (page_size == 8*1024) {
+ if (idx == 0) {
va &= ~(1ULL << (13 + tsb_size));
- } else if (page_size == 64*1024) {
+ } else {
va |= (1ULL << (13 + tsb_size));
}
tsb_base_mask <<= 1;
}
- return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
+ return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
}
/* Calculates tag target register value by reordering bits
@@ -127,9 +130,8 @@ static void replace_tlb_entry(SparcTLBEntry *tlb,
if (TTE_IS_VALID(tlb->tte)) {
CPUState *cs = CPU(sparc_env_get_cpu(env1));
- mask = 0xffffffffffffe000ULL;
- mask <<= 3 * ((tlb->tte >> 61) & 3);
- size = ~mask + 1;
+ size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
+ mask = 1ULL + ~size;
va = tlb->tag & mask;
@@ -202,12 +204,56 @@ static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
}
}
+static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
+ uint64_t sun4v_tte)
+{
+ uint64_t sun4u_tte;
+ if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
+ /* is already in the sun4u format */
+ return sun4v_tte;
+ }
+ sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
+ sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
+ TTE_SIDEEFFECT_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
+ sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
+ return sun4u_tte;
+}
+
static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
uint64_t tlb_tag, uint64_t tlb_tte,
- const char *strmmu, CPUSPARCState *env1)
+ const char *strmmu, CPUSPARCState *env1,
+ uint64_t addr)
{
unsigned int i, replace_used;
+ tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
+ if (cpu_has_hypervisor(env1)) {
+ uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
+ uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
+ uint32_t new_ctx = tlb_tag & 0x1fffU;
+ for (i = 0; i < 64; i++) {
+ uint32_t ctx = tlb[i].tag & 0x1fffU;
+ /* check if new mapping overlaps an existing one */
+ if (new_ctx == ctx) {
+ uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
+ uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
+ if (new_vaddr == vaddr
+ || (new_vaddr < vaddr + size
+ && vaddr < new_vaddr + new_size)) {
+ DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
+ new_vaddr);
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+ return;
+ }
+ }
+
+ }
+ }
/* Try replacing invalid entry */
for (i = 0; i < 64; i++) {
if (!TTE_IS_VALID(tlb[i].tte)) {
@@ -247,9 +293,11 @@ static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
}
#ifdef DEBUG_MMU
- DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
+ DPRINTF_MMU("%s lru replacement: no free entries available, "
+ "replacing the last one\n", strmmu);
#endif
- /* error state? */
+ /* corner case: the last entry is replaced anyway */
+ replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1);
}
#endif
@@ -294,6 +342,22 @@ static inline target_ulong asi_address_mask(CPUSPARCState *env,
}
return addr;
}
+
+#ifndef CONFIG_USER_ONLY
+static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
+{
+ /* ASIs >= 0x80 are user mode.
+ * ASIs >= 0x30 are hyper mode (or super if hyper is not available).
+ * ASIs <= 0x2f are super mode.
+ */
+ if (asi < 0x80
+ && !cpu_hypervisor_mode(env)
+ && (!cpu_supervisor_mode(env)
+ || (asi >= 0x30 && cpu_has_hypervisor(env)))) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, ra);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
#endif
static void do_check_align(CPUSPARCState *env, target_ulong addr,
@@ -816,7 +880,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
case 2: /* flush region (16M) */
case 3: /* flush context (4G) */
case 4: /* flush entire */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
break;
default:
break;
@@ -841,7 +905,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
are invalid in normal mode. */
if ((oldreg ^ env->mmuregs[reg])
& (MMU_NF | env->def->mmu_bm)) {
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
break;
case 1: /* Context Table Pointer Register */
@@ -852,7 +916,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
if (oldreg != env->mmuregs[reg]) {
/* we flush when the MMU context changes because
QEMU has no MMU context support */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
break;
case 3: /* Synchronous Fault Status Register with Clear */
@@ -1119,13 +1183,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
asi &= 0xff;
- if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
- || (cpu_has_hypervisor(env)
- && asi >= 0x30 && asi < 0x80
- && !(env->hpstate & HS_PRIV))) {
- cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
- }
-
+ do_check_asi(env, asi, GETPC());
do_check_align(env, addr, size - 1, GETPC());
addr = asi_address_mask(env, asi, addr);
@@ -1220,30 +1278,39 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case ASI_IMMU: /* I-MMU regs */
{
int reg = (addr >> 3) & 0xf;
-
- if (reg == 0) {
- /* I-TSB Tag Target register */
+ switch (reg) {
+ case 0:
+ /* 0x00 I-TSB Tag Target register */
ret = ultrasparc_tag_target(env->immu.tag_access);
- } else {
- ret = env->immuregs[reg];
+ break;
+ case 3: /* SFSR */
+ ret = env->immu.sfsr;
+ break;
+ case 5: /* TSB access */
+ ret = env->immu.tsb;
+ break;
+ case 6:
+ /* 0x30 I-TSB Tag Access register */
+ ret = env->immu.tag_access;
+ break;
+ default:
+ cpu_unassigned_access(cs, addr, false, false, 1, size);
+ ret = 0;
}
-
break;
}
case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
{
/* env->immuregs[5] holds I-MMU TSB register value
env->immuregs[6] holds I-MMU Tag Access register value */
- ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
- 8*1024);
+ ret = ultrasparc_tsb_pointer(env, &env->immu, 0);
break;
}
case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
{
/* env->immuregs[5] holds I-MMU TSB register value
env->immuregs[6] holds I-MMU Tag Access register value */
- ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
- 64*1024);
+ ret = ultrasparc_tsb_pointer(env, &env->immu, 1);
break;
}
case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
@@ -1263,12 +1330,38 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
case ASI_DMMU: /* D-MMU regs */
{
int reg = (addr >> 3) & 0xf;
-
- if (reg == 0) {
- /* D-TSB Tag Target register */
+ switch (reg) {
+ case 0:
+ /* 0x00 D-TSB Tag Target register */
ret = ultrasparc_tag_target(env->dmmu.tag_access);
- } else {
- ret = env->dmmuregs[reg];
+ break;
+ case 1: /* 0x08 Primary Context */
+ ret = env->dmmu.mmu_primary_context;
+ break;
+ case 2: /* 0x10 Secondary Context */
+ ret = env->dmmu.mmu_secondary_context;
+ break;
+ case 3: /* SFSR */
+ ret = env->dmmu.sfsr;
+ break;
+ case 4: /* 0x20 SFAR */
+ ret = env->dmmu.sfar;
+ break;
+ case 5: /* 0x28 TSB access */
+ ret = env->dmmu.tsb;
+ break;
+ case 6: /* 0x30 D-TSB Tag Access register */
+ ret = env->dmmu.tag_access;
+ break;
+ case 7:
+ ret = env->dmmu.virtual_watchpoint;
+ break;
+ case 8:
+ ret = env->dmmu.physical_watchpoint;
+ break;
+ default:
+ cpu_unassigned_access(cs, addr, false, false, 1, size);
+ ret = 0;
}
break;
}
@@ -1276,16 +1369,14 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
{
/* env->dmmuregs[5] holds D-MMU TSB register value
env->dmmuregs[6] holds D-MMU Tag Access register value */
- ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
- 8*1024);
+ ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0);
break;
}
case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
{
/* env->dmmuregs[5] holds D-MMU TSB register value
env->dmmuregs[6] holds D-MMU Tag Access register value */
- ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
- 64*1024);
+ ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1);
break;
}
case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
@@ -1315,6 +1406,30 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
}
break;
}
+ case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
+ if (unlikely((addr >= 0x20) && (addr < 0x30))) {
+ /* Hyperprivileged access only */
+ cpu_unassigned_access(cs, addr, false, false, 1, size);
+ }
+ /* fall through */
+ case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
+ {
+ unsigned int i = (addr >> 3) & 0x7;
+ ret = env->scratch[i];
+ break;
+ }
+ case ASI_MMU: /* UA2005 Context ID registers */
+ switch ((addr >> 3) & 0x3) {
+ case 1:
+ ret = env->dmmu.mmu_primary_context;
+ break;
+ case 2:
+ ret = env->dmmu.mmu_secondary_context;
+ break;
+ default:
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
+ }
+ break;
case ASI_DCACHE_DATA: /* D-cache data */
case ASI_DCACHE_TAG: /* D-cache tag access */
case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
@@ -1375,13 +1490,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
asi &= 0xff;
- if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
- || (cpu_has_hypervisor(env)
- && asi >= 0x30 && asi < 0x80
- && !(env->hpstate & HS_PRIV))) {
- cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
- }
-
+ do_check_asi(env, asi, GETPC());
do_check_align(env, addr, size - 1, GETPC());
addr = asi_address_mask(env, asi, addr);
@@ -1417,7 +1526,67 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
case ASI_TWINX_SL: /* Secondary, twinx, LE */
/* These are always handled inline. */
g_assert_not_reached();
-
+ /* these ASIs have different functions on UltraSPARC-IIIi
+ * and UA2005 CPUs. Use the explicit numbers to avoid confusion
+ */
+ case 0x31:
+ case 0x32:
+ case 0x39:
+ case 0x3a:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0
+ * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1
+ * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0
+ * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1
+ */
+ int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
+ env->dmmu.sun4v_tsb_pointers[idx] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case 0x33:
+ case 0x3b:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_DMMU_CTX_ZERO_CONFIG
+ * ASI_DMMU_CTX_NONZERO_CONFIG
+ */
+ env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case 0x35:
+ case 0x36:
+ case 0x3d:
+ case 0x3e:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0
+ * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1
+ * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0
+ * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1
+ */
+ int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
+ env->immu.sun4v_tsb_pointers[idx] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
+ case 0x37:
+ case 0x3f:
+ if (cpu_has_hypervisor(env)) {
+ /* UA2005
+ * ASI_IMMU_CTX_ZERO_CONFIG
+ * ASI_IMMU_CTX_NONZERO_CONFIG
+ */
+ env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
+ } else {
+ helper_raise_exception(env, TT_ILL_INSN);
+ }
+ break;
case ASI_UPA_CONFIG: /* UPA config */
/* XXX */
return;
@@ -1429,7 +1598,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int reg = (addr >> 3) & 0xf;
uint64_t oldreg;
- oldreg = env->immuregs[reg];
+ oldreg = env->immu.mmuregs[reg];
switch (reg) {
case 0: /* RO */
return;
@@ -1456,10 +1625,11 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
case 8:
return;
default:
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
break;
}
- if (oldreg != env->immuregs[reg]) {
+ if (oldreg != env->immu.mmuregs[reg]) {
DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
}
@@ -1469,7 +1639,11 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
return;
}
case ASI_ITLB_DATA_IN: /* I-MMU data in */
- replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
+ val, "immu", env, addr);
+ }
return;
case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
{
@@ -1477,8 +1651,11 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
unsigned int i = (addr >> 3) & 0x3f;
- replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
-
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
+ sun4v_tte_to_sun4u(env, addr, val), env);
+ }
#ifdef DEBUG_MMU
DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
dump_mmu(stdout, fprintf, env);
@@ -1493,7 +1670,7 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
int reg = (addr >> 3) & 0xf;
uint64_t oldreg;
- oldreg = env->dmmuregs[reg];
+ oldreg = env->dmmu.mmuregs[reg];
switch (reg) {
case 0: /* RO */
case 4:
@@ -1509,13 +1686,13 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
env->dmmu.mmu_primary_context = val;
/* can be optimized to only flush MMU_USER_IDX
and MMU_KERNEL_IDX entries */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
break;
case 2: /* Secondary context */
env->dmmu.mmu_secondary_context = val;
/* can be optimized to only flush MMU_USER_SECONDARY_IDX
and MMU_KERNEL_SECONDARY_IDX entries */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
break;
case 5: /* TSB access */
DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
@@ -1526,13 +1703,17 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
env->dmmu.tag_access = val;
break;
case 7: /* Virtual Watchpoint */
+ env->dmmu.virtual_watchpoint = val;
+ break;
case 8: /* Physical Watchpoint */
+ env->dmmu.physical_watchpoint = val;
+ break;
default:
- env->dmmuregs[reg] = val;
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
break;
}
- if (oldreg != env->dmmuregs[reg]) {
+ if (oldreg != env->dmmu.mmuregs[reg]) {
DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
}
@@ -1542,14 +1723,21 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
return;
}
case ASI_DTLB_DATA_IN: /* D-MMU data in */
- replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
- return;
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
+ val, "dmmu", env, addr);
+ }
+ return;
case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
{
unsigned int i = (addr >> 3) & 0x3f;
- replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
-
+ /* ignore real translation entries */
+ if (!(addr & TLB_UST1_IS_REAL_BIT)) {
+ replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
+ sun4v_tte_to_sun4u(env, addr, val), env);
+ }
#ifdef DEBUG_MMU
DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
dump_mmu(stdout, fprintf, env);
@@ -1562,6 +1750,38 @@ void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
case ASI_INTR_RECEIVE: /* Interrupt data receive */
env->ivec_status = val & 0x20;
return;
+ case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
+ if (unlikely((addr >= 0x20) && (addr < 0x30))) {
+ /* Hyperprivileged access only */
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
+ }
+ /* fall through */
+ case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
+ {
+ unsigned int i = (addr >> 3) & 0x7;
+ env->scratch[i] = val;
+ return;
+ }
+ case ASI_MMU: /* UA2005 Context ID registers */
+ {
+ switch ((addr >> 3) & 0x3) {
+ case 1:
+ env->dmmu.mmu_primary_context = val;
+ env->immu.mmu_primary_context = val;
+ tlb_flush_by_mmuidx(CPU(cpu), MMU_USER_IDX, MMU_KERNEL_IDX, -1);
+ break;
+ case 2:
+ env->dmmu.mmu_secondary_context = val;
+ env->immu.mmu_secondary_context = val;
+ tlb_flush_by_mmuidx(CPU(cpu), MMU_USER_SECONDARY_IDX,
+ MMU_KERNEL_SECONDARY_IDX, -1);
+ break;
+ default:
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
+ }
+ }
+ return;
+ case ASI_QUEUE: /* UA2005 CPU mondo queue */
case ASI_DCACHE_DATA: /* D-cache data */
case ASI_DCACHE_TAG: /* D-cache tag access */
case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
@@ -1654,7 +1874,7 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
/* flush neverland mappings created during no-fault mode,
so the sequential MMU faults report proper fault types */
if (env->mmuregs[0] & MMU_NF) {
- tlb_flush(cs, 1);
+ tlb_flush(cs);
}
}
#else
@@ -1664,14 +1884,25 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
{
SPARCCPU *cpu = SPARC_CPU(cs);
CPUSPARCState *env = &cpu->env;
- int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
#ifdef DEBUG_UNASSIGNED
printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
"\n", addr, env->pc);
#endif
- cpu_raise_exception_ra(env, tt, GETPC());
+ if (is_exec) { /* XXX has_hypervisor */
+ if (env->lsu & (IMMU_E)) {
+ cpu_raise_exception_ra(env, TT_CODE_ACCESS, GETPC());
+ } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
+ cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, GETPC());
+ }
+ } else {
+ if (env->lsu & (DMMU_E)) {
+ cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+ } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
+ cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, GETPC());
+ }
+ }
}
#endif
#endif
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
index aea6397861..6bd6b8ee3e 100644
--- a/target/sparc/machine.c
+++ b/target/sparc/machine.c
@@ -56,7 +56,7 @@ static const VMStateDescription vmstate_tlb_entry = {
};
#endif
-static int get_psr(QEMUFile *f, void *opaque, size_t size)
+static int get_psr(QEMUFile *f, void *opaque, size_t size, VMStateField *field)
{
SPARCCPU *cpu = opaque;
CPUSPARCState *env = &cpu->env;
@@ -69,7 +69,8 @@ static int get_psr(QEMUFile *f, void *opaque, size_t size)
return 0;
}
-static void put_psr(QEMUFile *f, void *opaque, size_t size)
+static int put_psr(QEMUFile *f, void *opaque, size_t size, VMStateField *field,
+ QJSON *vmdesc)
{
SPARCCPU *cpu = opaque;
CPUSPARCState *env = &cpu->env;
@@ -78,6 +79,7 @@ static void put_psr(QEMUFile *f, void *opaque, size_t size)
val = cpu_get_psr(env);
qemu_put_be32(f, val);
+ return 0;
}
static const VMStateInfo vmstate_psr = {
@@ -148,8 +150,8 @@ const VMStateDescription vmstate_sparc_cpu = {
VMSTATE_UINT64_ARRAY(env.mmubpregs, SPARCCPU, 4),
#else
VMSTATE_UINT64(env.lsu, SPARCCPU),
- VMSTATE_UINT64_ARRAY(env.immuregs, SPARCCPU, 16),
- VMSTATE_UINT64_ARRAY(env.dmmuregs, SPARCCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.immu.mmuregs, SPARCCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.dmmu.mmuregs, SPARCCPU, 16),
VMSTATE_STRUCT_ARRAY(env.itlb, SPARCCPU, 64, 0,
vmstate_tlb_entry, SparcTLBEntry),
VMSTATE_STRUCT_ARRAY(env.dtlb, SPARCCPU, 64, 0,
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
index 044e88c4c5..8b4664d996 100644
--- a/target/sparc/mmu_helper.c
+++ b/target/sparc/mmu_helper.c
@@ -456,23 +456,7 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
uint64_t address, uint64_t context,
hwaddr *physical)
{
- uint64_t mask;
-
- switch (TTE_PGSIZE(tlb->tte)) {
- default:
- case 0x0: /* 8k */
- mask = 0xffffffffffffe000ULL;
- break;
- case 0x1: /* 64k */
- mask = 0xffffffffffff0000ULL;
- break;
- case 0x2: /* 512k */
- mask = 0xfffffffffff80000ULL;
- break;
- case 0x3: /* 4M */
- mask = 0xffffffffffc00000ULL;
- break;
- }
+ uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
/* valid, context match, virtual address match? */
if (TTE_IS_VALID(tlb->tte) &&
@@ -757,6 +741,8 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
PRId64 "\n",
env->dmmu.mmu_primary_context,
env->dmmu.mmu_secondary_context);
+ (*cpu_fprintf)(f, "DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
+ "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
if ((env->lsu & DMMU_E) == 0) {
(*cpu_fprintf)(f, "DMMU disabled\n");
} else {
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 2205f89837..655060cd9a 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -72,9 +72,16 @@ typedef struct DisasContext {
target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
int is_br;
int mem_idx;
- int fpu_enabled;
- int address_mask_32bit;
- int singlestep;
+ bool fpu_enabled;
+ bool address_mask_32bit;
+ bool singlestep;
+#ifndef CONFIG_USER_ONLY
+ bool supervisor;
+#ifdef TARGET_SPARC64
+ bool hypervisor;
+#endif
+#endif
+
uint32_t cc_op; /* current CC operation */
struct TranslationBlock *tb;
sparc_def_t *def;
@@ -283,10 +290,11 @@ static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
#define hypervisor(dc) 0
#endif
#else
-#define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
#ifdef TARGET_SPARC64
-#define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
+#define hypervisor(dc) (dc->hypervisor)
+#define supervisor(dc) (dc->supervisor | dc->hypervisor)
#else
+#define supervisor(dc) (dc->supervisor)
#endif
#endif
@@ -2134,7 +2142,11 @@ static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
case ASI_TWINX_NL:
case ASI_NUCLEUS_QUAD_LDD:
case ASI_NUCLEUS_QUAD_LDD_L:
- mem_idx = MMU_NUCLEUS_IDX;
+ if (hypervisor(dc)) {
+ mem_idx = MMU_PHYS_IDX;
+ } else {
+ mem_idx = MMU_NUCLEUS_IDX;
+ }
break;
case ASI_AIUP: /* As if user primary */
case ASI_AIUPL: /* As if user primary LE */
@@ -2309,8 +2321,19 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
case GET_ASI_EXCP:
break;
case GET_ASI_DTWINX: /* Reserved for stda. */
+#ifndef TARGET_SPARC64
gen_exception(dc, TT_ILL_INSN);
break;
+#else
+ if (!(dc->def->features & CPU_FEATURE_HYPV)) {
+ /* Pre OpenSPARC CPUs don't have these */
+ gen_exception(dc, TT_ILL_INSN);
+ return;
+ }
+ /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
+ * are ST_BLKINIT_ ASIs */
+ /* fall through */
+#endif
case GET_ASI_DIRECT:
gen_address_mask(dc, addr);
tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
@@ -3286,7 +3309,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
rs1 = GET_FIELD_SP(insn, 14, 18);
if (IS_IMM) {
- rs2 = GET_FIELD_SP(insn, 0, 6);
+ rs2 = GET_FIELD_SP(insn, 0, 7);
if (rs1 == 0) {
tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
/* Signal that the trap value is fully constant. */
@@ -3421,6 +3444,17 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
case 0x19: /* System tick compare */
gen_store_gpr(dc, rd, cpu_stick_cmpr);
break;
+ case 0x1a: /* UltraSPARC-T1 Strand status */
+ /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
+ * this ASR as impl. dep
+ */
+ CHECK_IU_FEATURE(dc, HYPV);
+ {
+ TCGv t = gen_dest_gpr(dc, rd);
+ tcg_gen_movi_tl(t, 1UL);
+ gen_store_gpr(dc, rd, t);
+ }
+ break;
case 0x10: /* Performance Control */
case 0x11: /* Performance Instrumentation Counter */
case 0x12: /* Dispatch Control */
@@ -3445,7 +3479,8 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0: // hpstate
- // gen_op_rdhpstate();
+ tcg_gen_ld_i64(cpu_dst, cpu_env,
+ offsetof(CPUSPARCState, hpstate));
break;
case 1: // htstate
// gen_op_rdhtstate();
@@ -4535,8 +4570,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
- tcg_gen_st32_tl(cpu_tmp0, cpu_env,
- offsetof(CPUSPARCState, gl));
+ gen_helper_wrgl(cpu_env, cpu_tmp0);
break;
case 26: // UA2005 strand status
CHECK_IU_FEATURE(dc, HYPV);
@@ -4570,7 +4604,9 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (rd) {
case 0: // hpstate
- // XXX gen_op_wrhpstate();
+ tcg_gen_st_i64(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ hpstate));
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
@@ -4647,7 +4683,7 @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2e: /* V9 popc */
- gen_helper_popc(cpu_dst, cpu_src2);
+ tcg_gen_ctpop_tl(cpu_dst, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2f: /* V9 movr */
@@ -5710,9 +5746,15 @@ void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
dc->address_mask_32bit = tb_am_enabled(tb->flags);
dc->singlestep = (cs->singlestep_enabled || singlestep);
+#ifndef CONFIG_USER_ONLY
+ dc->supervisor = (tb->flags & TB_FLAG_SUPER) != 0;
+#endif
#ifdef TARGET_SPARC64
dc->fprs_dirty = 0;
dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
+#ifndef CONFIG_USER_ONLY
+ dc->hypervisor = (tb->flags & TB_FLAG_HYPER) != 0;
+#endif
#endif
num_insns = 0;
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
index 2d5b5469a9..71b3dd37e8 100644
--- a/target/sparc/win_helper.c
+++ b/target/sparc/win_helper.c
@@ -290,6 +290,10 @@ void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp)
static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
{
+ if (env->def->features & CPU_FEATURE_GL) {
+ return env->glregs + (env->gl & 7) * 8;
+ }
+
switch (pstate) {
default:
trace_win_helper_gregset_error(pstate);
@@ -305,14 +309,40 @@ static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
}
}
+static inline uint64_t *get_gl_gregset(CPUSPARCState *env, uint32_t gl)
+{
+ return env->glregs + (gl & 7) * 8;
+}
+
+/* Switch global register bank */
+void cpu_gl_switch_gregs(CPUSPARCState *env, uint32_t new_gl)
+{
+ uint64_t *src, *dst;
+ src = get_gl_gregset(env, new_gl);
+ dst = get_gl_gregset(env, env->gl);
+
+ if (src != dst) {
+ memcpy32(dst, env->gregs);
+ memcpy32(env->gregs, src);
+ }
+}
+
+void helper_wrgl(CPUSPARCState *env, target_ulong new_gl)
+{
+ cpu_gl_switch_gregs(env, new_gl & 7);
+ env->gl = new_gl & 7;
+}
+
void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
{
uint32_t pstate_regs, new_pstate_regs;
uint64_t *src, *dst;
if (env->def->features & CPU_FEATURE_GL) {
- /* PS_AG is not implemented in this case */
- new_pstate &= ~PS_AG;
+ /* PS_AG, IG and MG are not implemented in this case */
+ new_pstate &= ~(PS_AG | PS_IG | PS_MG);
+ env->pstate = new_pstate;
+ return;
}
pstate_regs = env->pstate & 0xc01;
@@ -366,6 +396,12 @@ void helper_done(CPUSPARCState *env)
env->asi = (tsptr->tstate >> 24) & 0xff;
cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
cpu_put_cwp64(env, tsptr->tstate & 0xff);
+ if (cpu_has_hypervisor(env)) {
+ uint32_t new_gl = (tsptr->tstate >> 40) & 7;
+ env->hpstate = env->htstate[env->tl];
+ cpu_gl_switch_gregs(env, new_gl);
+ env->gl = new_gl;
+ }
env->tl--;
trace_win_helper_done(env->tl);
@@ -387,6 +423,12 @@ void helper_retry(CPUSPARCState *env)
env->asi = (tsptr->tstate >> 24) & 0xff;
cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
cpu_put_cwp64(env, tsptr->tstate & 0xff);
+ if (cpu_has_hypervisor(env)) {
+ uint32_t new_gl = (tsptr->tstate >> 40) & 7;
+ env->hpstate = env->htstate[env->tl];
+ cpu_gl_switch_gregs(env, new_gl);
+ env->gl = new_gl;
+ }
env->tl--;
trace_win_helper_retry(env->tl);
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
index 454793f94a..d90e38e88c 100644
--- a/target/tilegx/cpu.c
+++ b/target/tilegx/cpu.c
@@ -84,8 +84,7 @@ static void tilegx_cpu_reset(CPUState *s)
tcc->parent_reset(s);
- memset(env, 0, sizeof(CPUTLGState));
- tlb_flush(s, 1);
+ memset(env, 0, offsetof(CPUTLGState, end_reset_fields));
}
static void tilegx_cpu_realizefn(DeviceState *dev, Error **errp)
diff --git a/target/tilegx/cpu.h b/target/tilegx/cpu.h
index 1735427233..f32be49f65 100644
--- a/target/tilegx/cpu.h
+++ b/target/tilegx/cpu.h
@@ -97,6 +97,9 @@ typedef struct CPUTLGState {
uint32_t sigcode; /* Signal code */
#endif
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
CPU_COMMON
} CPUTLGState;
diff --git a/target/tilegx/helper.c b/target/tilegx/helper.c
index b4fba9cc21..4964bb9111 100644
--- a/target/tilegx/helper.c
+++ b/target/tilegx/helper.c
@@ -55,21 +55,6 @@ void helper_ext01_ics(CPUTLGState *env)
}
}
-uint64_t helper_cntlz(uint64_t arg)
-{
- return clz64(arg);
-}
-
-uint64_t helper_cnttz(uint64_t arg)
-{
- return ctz64(arg);
-}
-
-uint64_t helper_pcnt(uint64_t arg)
-{
- return ctpop64(arg);
-}
-
uint64_t helper_revbits(uint64_t arg)
{
return revbit64(arg);
diff --git a/target/tilegx/helper.h b/target/tilegx/helper.h
index 9281d0f428..16745c266f 100644
--- a/target/tilegx/helper.h
+++ b/target/tilegx/helper.h
@@ -1,8 +1,5 @@
DEF_HELPER_2(exception, noreturn, env, i32)
DEF_HELPER_1(ext01_ics, void, env)
-DEF_HELPER_FLAGS_1(cntlz, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(cnttz, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(pcnt, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(revbits, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_3(shufflebytes, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(crc32_8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
index 9c734eeba3..ff2ef7b63d 100644
--- a/target/tilegx/translate.c
+++ b/target/tilegx/translate.c
@@ -608,12 +608,12 @@ static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
switch (opext) {
case OE_RR_X0(CNTLZ):
case OE_RR_Y0(CNTLZ):
- gen_helper_cntlz(tdest, tsrca);
+ tcg_gen_clzi_tl(tdest, tsrca, TARGET_LONG_BITS);
mnemonic = "cntlz";
break;
case OE_RR_X0(CNTTZ):
case OE_RR_Y0(CNTTZ):
- gen_helper_cnttz(tdest, tsrca);
+ tcg_gen_ctzi_tl(tdest, tsrca, TARGET_LONG_BITS);
mnemonic = "cnttz";
break;
case OE_RR_X0(FSINGLE_PACK1):
@@ -697,7 +697,7 @@ static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
break;
case OE_RR_X0(PCNT):
case OE_RR_Y0(PCNT):
- gen_helper_pcnt(tdest, tsrca);
+ tcg_gen_ctpop_tl(tdest, tsrca);
mnemonic = "pcnt";
break;
case OE_RR_X0(REVBITS):
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
index 785b76bd3a..08f50e2ba7 100644
--- a/target/tricore/cpu.c
+++ b/target/tricore/cpu.c
@@ -53,8 +53,6 @@ static void tricore_cpu_reset(CPUState *s)
tcc->parent_reset(s);
- tlb_flush(s, 1);
-
cpu_state_reset(env);
}
diff --git a/target/tricore/fpu_helper.c b/target/tricore/fpu_helper.c
index 98fe9472b1..7979bb6692 100644
--- a/target/tricore/fpu_helper.c
+++ b/target/tricore/fpu_helper.c
@@ -21,7 +21,8 @@
#include "cpu.h"
#include "exec/helper-proto.h"
-#define ADD_NAN 0x7cf00001
+#define QUIET_NAN 0x7fc00000
+#define ADD_NAN 0x7fc00001
#define DIV_NAN 0x7fc00008
#define MUL_NAN 0x7fc00002
#define FPU_FS PSW_USB_C
@@ -47,6 +48,42 @@ static inline bool f_is_denormal(float32 arg)
return float32_is_zero_or_denormal(arg) && !float32_is_zero(arg);
}
+static inline float32 f_maddsub_nan_result(float32 arg1, float32 arg2,
+ float32 arg3, float32 result,
+ uint32_t muladd_negate_c)
+{
+ uint32_t aSign, bSign, cSign;
+ uint32_t aExp, bExp, cExp;
+
+ if (float32_is_any_nan(arg1) || float32_is_any_nan(arg2) ||
+ float32_is_any_nan(arg3)) {
+ return QUIET_NAN;
+ } else if (float32_is_infinity(arg1) && float32_is_zero(arg2)) {
+ return MUL_NAN;
+ } else if (float32_is_zero(arg1) && float32_is_infinity(arg2)) {
+ return MUL_NAN;
+ } else {
+ aSign = arg1 >> 31;
+ bSign = arg2 >> 31;
+ cSign = arg3 >> 31;
+
+ aExp = (arg1 >> 23) & 0xff;
+ bExp = (arg2 >> 23) & 0xff;
+ cExp = (arg3 >> 23) & 0xff;
+
+ if (muladd_negate_c) {
+ cSign ^= 1;
+ }
+ if (((aExp == 0xff) || (bExp == 0xff)) && (cExp == 0xff)) {
+ if (aSign ^ bSign ^ cSign) {
+ return ADD_NAN;
+ }
+ }
+ }
+
+ return result;
+}
+
static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags)
{
uint8_t some_excp = 0;
@@ -159,6 +196,60 @@ uint32_t helper_fdiv(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
return (uint32_t)f_result;
}
+uint32_t helper_fmadd(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 arg3 = make_float32(r3);
+ float32 f_result;
+
+ f_result = float32_muladd(arg1, arg2, arg3, 0, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ if (flags & float_flag_invalid) {
+ arg1 = float32_squash_input_denormal(arg1, &env->fp_status);
+ arg2 = float32_squash_input_denormal(arg2, &env->fp_status);
+ arg3 = float32_squash_input_denormal(arg3, &env->fp_status);
+ f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 0);
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_fmsub(CPUTriCoreState *env, uint32_t r1,
+ uint32_t r2, uint32_t r3)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 arg3 = make_float32(r3);
+ float32 f_result;
+
+ f_result = float32_muladd(arg1, arg2, arg3, float_muladd_negate_product,
+ &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ if (flags & float_flag_invalid) {
+ arg1 = float32_squash_input_denormal(arg1, &env->fp_status);
+ arg2 = float32_squash_input_denormal(arg2, &env->fp_status);
+ arg3 = float32_squash_input_denormal(arg3, &env->fp_status);
+
+ f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 1);
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
+
uint32_t helper_fcmp(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
{
uint32_t result, flags;
@@ -215,3 +306,44 @@ uint32_t helper_itof(CPUTriCoreState *env, uint32_t arg)
}
return (uint32_t)f_result;
}
+
+uint32_t helper_ftouz(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ uint32_t result;
+ int32_t flags;
+
+ result = float32_to_uint32_round_to_zero(f_arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags & float_flag_invalid) {
+ flags &= ~float_flag_inexact;
+ if (float32_is_any_nan(f_arg)) {
+ result = 0;
+ }
+ } else if (float32_lt_quiet(f_arg, 0, &env->fp_status)) {
+ flags = float_flag_invalid;
+ result = 0;
+ }
+
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return result;
+}
+
+void helper_updfl(CPUTriCoreState *env, uint32_t arg)
+{
+ env->FPU_FS = extract32(arg, 7, 1) & extract32(arg, 15, 1);
+ env->FPU_FI = (extract32(arg, 6, 1) & extract32(arg, 14, 1)) << 31;
+ env->FPU_FV = (extract32(arg, 5, 1) & extract32(arg, 13, 1)) << 31;
+ env->FPU_FZ = (extract32(arg, 4, 1) & extract32(arg, 12, 1)) << 31;
+ env->FPU_FU = (extract32(arg, 3, 1) & extract32(arg, 11, 1)) << 31;
+ /* clear FX and RM */
+ env->PSW &= ~(extract32(arg, 10, 1) << 26);
+ env->PSW |= (extract32(arg, 2, 1) & extract32(arg, 10, 1)) << 26;
+
+ fpu_set_state(env);
+}
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
index 9333e161ab..e634d0c680 100644
--- a/target/tricore/helper.h
+++ b/target/tricore/helper.h
@@ -87,11 +87,8 @@ DEF_HELPER_FLAGS_2(min_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
DEF_HELPER_FLAGS_2(ixmin, TCG_CALL_NO_RWG_SE, i64, i64, i32)
DEF_HELPER_FLAGS_2(ixmin_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
/* count leading ... */
-DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(clo_h, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(clz_h, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(cls, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(cls_h, TCG_CALL_NO_RWG_SE, i32, i32)
/* sh */
DEF_HELPER_FLAGS_2(sh, TCG_CALL_NO_RWG_SE, i32, i32, i32)
@@ -109,9 +106,13 @@ DEF_HELPER_3(fadd, i32, env, i32, i32)
DEF_HELPER_3(fsub, i32, env, i32, i32)
DEF_HELPER_3(fmul, i32, env, i32, i32)
DEF_HELPER_3(fdiv, i32, env, i32, i32)
+DEF_HELPER_4(fmadd, i32, env, i32, i32, i32)
+DEF_HELPER_4(fmsub, i32, env, i32, i32, i32)
DEF_HELPER_3(fcmp, i32, env, i32, i32)
DEF_HELPER_2(ftoi, i32, env, i32)
DEF_HELPER_2(itof, i32, env, i32)
+DEF_HELPER_2(ftouz, i32, env, i32)
+DEF_HELPER_2(updfl, void, env, i32)
/* dvinit */
DEF_HELPER_3(dvinit_b_13, i64, env, i32, i32)
DEF_HELPER_3(dvinit_b_131, i64, env, i32, i32)
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index ac02e0a36b..7af202c8c0 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -1733,11 +1733,6 @@ EXTREMA_H_B(min, <)
#undef EXTREMA_H_B
-uint32_t helper_clo(target_ulong r1)
-{
- return clo32(r1);
-}
-
uint32_t helper_clo_h(target_ulong r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
@@ -1756,11 +1751,6 @@ uint32_t helper_clo_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_clz(target_ulong r1)
-{
- return clz32(r1);
-}
-
uint32_t helper_clz_h(target_ulong r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
@@ -1779,11 +1769,6 @@ uint32_t helper_clz_h(target_ulong r1)
return ret_hw0 | (ret_hw1 << 16);
}
-uint32_t helper_cls(target_ulong r1)
-{
- return clrsb32(r1);
-}
-
uint32_t helper_cls_h(target_ulong r1)
{
uint32_t ret_hw0 = extract32(r1, 0, 16);
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
index 36f734a662..ddd2dd07dd 100644
--- a/target/tricore/translate.c
+++ b/target/tricore/translate.c
@@ -3362,9 +3362,17 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
case OPC1_16_SBC_JEQ:
gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset);
break;
+ case OPC1_16_SBC_JEQ2:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant,
+ offset + 16);
+ break;
case OPC1_16_SBC_JNE:
gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset);
break;
+ case OPC1_16_SBC_JNE2:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15],
+ constant, offset + 16);
+ break;
/* SBRN-format jumps */
case OPC1_16_SBRN_JZ_T:
temp = tcg_temp_new();
@@ -4097,6 +4105,16 @@ static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
gen_compute_branch(ctx, op1, 0, 0, const16, address);
break;
+ case OPC1_16_SBC_JEQ2:
+ case OPC1_16_SBC_JNE2:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ address = MASK_OP_SBC_DISP4(ctx->opcode);
+ const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, const16, address);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
/* SBRN-format */
case OPC1_16_SBRN_JNZ_T:
case OPC1_16_SBRN_JZ_T:
@@ -6034,6 +6052,8 @@ static void decode_rr_accumulator(CPUTriCoreState *env, DisasContext *ctx)
uint32_t op2;
int r3, r2, r1;
+ TCGv temp;
+
r3 = MASK_OP_RR_D(ctx->opcode);
r2 = MASK_OP_RR_S2(ctx->opcode);
r1 = MASK_OP_RR_S1(ctx->opcode);
@@ -6224,6 +6244,20 @@ static void decode_rr_accumulator(CPUTriCoreState *env, DisasContext *ctx)
case OPC2_32_RR_MOV:
tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
break;
+ case OPC2_32_RR_MOV_64:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ temp = tcg_temp_new();
+
+ CHECK_REG_PAIR(r3);
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp);
+
+ tcg_temp_free(temp);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
case OPC2_32_RR_NE:
tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
cpu_gpr_d[r2]);
@@ -6367,19 +6401,20 @@ static void decode_rr_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
break;
case OPC2_32_RR_CLO:
- gen_helper_clo(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_not_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS);
break;
case OPC2_32_RR_CLO_H:
gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLS:
- gen_helper_cls(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clrsb_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLS_H:
gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
break;
case OPC2_32_RR_CLZ:
- gen_helper_clz(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS);
break;
case OPC2_32_RR_CLZ_H:
gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
@@ -6698,6 +6733,12 @@ static void decode_rr_divide(CPUTriCoreState *env, DisasContext *ctx)
case OPC2_32_RR_ITOF:
gen_helper_itof(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
break;
+ case OPC2_32_RR_FTOUZ:
+ gen_helper_ftouz(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_UPDFL:
+ gen_helper_updfl(cpu_env, cpu_gpr_d[r1]);
+ break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
@@ -7093,6 +7134,14 @@ static void decode_rrr_divide(CPUTriCoreState *env, DisasContext *ctx)
case OPC2_32_RRR_SUB_F:
gen_helper_fsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
break;
+ case OPC2_32_RRR_MADD_F:
+ gen_helper_fmadd(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_MSUB_F:
+ gen_helper_fmsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r3]);
+ break;
default:
generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
}
diff --git a/target/tricore/tricore-opcodes.h b/target/tricore/tricore-opcodes.h
index df666b081f..08394b85ac 100644
--- a/target/tricore/tricore-opcodes.h
+++ b/target/tricore/tricore-opcodes.h
@@ -311,6 +311,7 @@ enum {
OPC1_16_SRR_EQ = 0x3a,
OPC1_16_SB_J = 0x3c,
OPC1_16_SBC_JEQ = 0x1e,
+ OPC1_16_SBC_JEQ2 = 0x9e,
OPC1_16_SBR_JEQ = 0x3e,
OPC1_16_SBR_JGEZ = 0xce,
OPC1_16_SBR_JGTZ = 0x4e,
@@ -318,6 +319,7 @@ enum {
OPC1_16_SBR_JLEZ = 0x8e,
OPC1_16_SBR_JLTZ = 0x0e,
OPC1_16_SBC_JNE = 0x5e,
+ OPC1_16_SBC_JNE2 = 0xde,
OPC1_16_SBR_JNE = 0x7e,
OPC1_16_SB_JNZ = 0xee,
OPC1_16_SBR_JNZ = 0xf6,
@@ -1062,6 +1064,7 @@ enum {
OPC2_32_RR_MIN_H = 0x78,
OPC2_32_RR_MIN_HU = 0x79,
OPC2_32_RR_MOV = 0x1f,
+ OPC2_32_RR_MOV_64 = 0x81,
OPC2_32_RR_NE = 0x11,
OPC2_32_RR_OR_EQ = 0x27,
OPC2_32_RR_OR_GE = 0x2b,
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
index c169972b59..c9b78ce68e 100644
--- a/target/unicore32/cpu.c
+++ b/target/unicore32/cpu.c
@@ -133,7 +133,7 @@ static void uc32_cpu_initfn(Object *obj)
env->regs[31] = 0x03000000;
#endif
- tlb_flush(cs, 1);
+ tlb_flush(cs);
if (tcg_enabled() && !inited) {
inited = true;
diff --git a/target/unicore32/helper.c b/target/unicore32/helper.c
index d603bde237..f9239dc7b8 100644
--- a/target/unicore32/helper.c
+++ b/target/unicore32/helper.c
@@ -32,16 +32,6 @@ UniCore32CPU *uc32_cpu_init(const char *cpu_model)
return UNICORE32_CPU(cpu_generic_init(TYPE_UNICORE32_CPU, cpu_model));
}
-uint32_t HELPER(clo)(uint32_t x)
-{
- return clo32(x);
-}
-
-uint32_t HELPER(clz)(uint32_t x)
-{
- return clz32(x);
-}
-
#ifndef CONFIG_USER_ONLY
void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg,
uint32_t cop)
@@ -116,7 +106,7 @@ void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg,
case 6:
if ((cop <= 6) && (cop >= 2)) {
/* invalid all tlb */
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
return;
}
break;
diff --git a/target/unicore32/helper.h b/target/unicore32/helper.h
index 941813749d..a4a5d45d1d 100644
--- a/target/unicore32/helper.h
+++ b/target/unicore32/helper.h
@@ -13,9 +13,6 @@ DEF_HELPER_3(cp0_get, i32, env, i32, i32)
DEF_HELPER_1(cp1_putc, void, i32)
#endif
-DEF_HELPER_1(clz, i32, i32)
-DEF_HELPER_1(clo, i32, i32)
-
DEF_HELPER_2(exception, void, env, i32)
DEF_HELPER_3(asr_write, void, env, i32, i32)
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
index 514d460408..666a2016a8 100644
--- a/target/unicore32/translate.c
+++ b/target/unicore32/translate.c
@@ -1479,10 +1479,10 @@ static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* clz */
tmp = load_reg(s, UCOP_REG_M);
if (UCOP_SET(26)) {
- gen_helper_clo(tmp, tmp);
- } else {
- gen_helper_clz(tmp, tmp);
+ /* clo */
+ tcg_gen_not_i32(tmp, tmp);
}
+ tcg_gen_clzi_i32(tmp, tmp, 32);
store_reg(s, UCOP_REG_D, tmp);
return;
}
diff --git a/target/xtensa/helper.h b/target/xtensa/helper.h
index db3c9c5f21..cc751c98fb 100644
--- a/target/xtensa/helper.h
+++ b/target/xtensa/helper.h
@@ -3,8 +3,6 @@ DEF_HELPER_3(exception_cause, noreturn, env, i32, i32)
DEF_HELPER_4(exception_cause_vaddr, noreturn, env, i32, i32, i32)
DEF_HELPER_3(debug_exception, noreturn, env, i32, i32)
-DEF_HELPER_FLAGS_1(nsa, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(nsau, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_2(wsr_windowbase, void, env, i32)
DEF_HELPER_4(entry, void, env, i32, i32, i32)
DEF_HELPER_2(retw, i32, env, i32)
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
index b456c2ec3f..af2723445d 100644
--- a/target/xtensa/op_helper.c
+++ b/target/xtensa/op_helper.c
@@ -164,19 +164,6 @@ void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
HELPER(exception)(env, EXC_DEBUG);
}
-uint32_t HELPER(nsa)(uint32_t v)
-{
- if (v & 0x80000000) {
- v = ~v;
- }
- return v ? clz32(v) - 1 : 31;
-}
-
-uint32_t HELPER(nsau)(uint32_t v)
-{
- return v ? clz32(v) : 32;
-}
-
static void copy_window_from_phys(CPUXtensaState *env,
uint32_t window, uint32_t phys, uint32_t n)
{
@@ -537,7 +524,7 @@ void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
v = (v & 0xffffff00) | 0x1;
if (v != env->sregs[RASID]) {
env->sregs[RASID] = v;
- tlb_flush(CPU(cpu), 1);
+ tlb_flush(CPU(cpu));
}
}
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
index c0408a01c7..263002486c 100644
--- a/target/xtensa/translate.c
+++ b/target/xtensa/translate.c
@@ -1450,14 +1450,14 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
case 14: /*NSAu*/
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
if (gen_window_check2(dc, RRR_S, RRR_T)) {
- gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
+ tcg_gen_clrsb_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
}
break;
case 15: /*NSAUu*/
HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
if (gen_window_check2(dc, RRR_S, RRR_T)) {
- gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
+ tcg_gen_clzi_i32(cpu_R[RRR_T], cpu_R[RRR_S], 32);
}
break;
diff --git a/tcg-runtime.c b/tcg-runtime.c
index 9327b6f23b..4c60c96658 100644
--- a/tcg-runtime.c
+++ b/tcg-runtime.c
@@ -101,6 +101,46 @@ int64_t HELPER(mulsh_i64)(int64_t arg1, int64_t arg2)
return h;
}
+uint32_t HELPER(clz_i32)(uint32_t arg, uint32_t zero_val)
+{
+ return arg ? clz32(arg) : zero_val;
+}
+
+uint32_t HELPER(ctz_i32)(uint32_t arg, uint32_t zero_val)
+{
+ return arg ? ctz32(arg) : zero_val;
+}
+
+uint64_t HELPER(clz_i64)(uint64_t arg, uint64_t zero_val)
+{
+ return arg ? clz64(arg) : zero_val;
+}
+
+uint64_t HELPER(ctz_i64)(uint64_t arg, uint64_t zero_val)
+{
+ return arg ? ctz64(arg) : zero_val;
+}
+
+uint32_t HELPER(clrsb_i32)(uint32_t arg)
+{
+ return clrsb32(arg);
+}
+
+uint64_t HELPER(clrsb_i64)(uint64_t arg)
+{
+ return clrsb64(arg);
+}
+
+uint32_t HELPER(ctpop_i32)(uint32_t arg)
+{
+ return ctpop32(arg);
+}
+
+uint64_t HELPER(ctpop_i64)(uint64_t arg)
+{
+ return ctpop64(arg);
+}
+
void HELPER(exit_atomic)(CPUArchState *env)
{
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
diff --git a/tcg/README b/tcg/README
index ae31388c59..a9858c2f74 100644
--- a/tcg/README
+++ b/tcg/README
@@ -246,6 +246,14 @@ t0=~(t1|t2)
t0=t1|~t2
+* clz_i32/i64 t0, t1, t2
+
+t0 = t1 ? clz(t1) : t2
+
+* ctz_i32/i64 t0, t1, t2
+
+t0 = t1 ? ctz(t1) : t2
+
********* Shifts/Rotates
* shl_i32/i64 t0, t1, t2
@@ -314,11 +322,27 @@ The bitfield is described by POS/LEN, which are immediate values:
LEN - the length of the bitfield
POS - the position of the first bit, counting from the LSB
-For example, pos=8, len=4 indicates a 4-bit field at bit 8.
-This operation would be equivalent to
+For example, "deposit_i32 dest, t1, t2, 8, 4" indicates a 4-bit field
+at bit 8. This operation would be equivalent to
dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00)
+* extract_i32/i64 dest, t1, pos, len
+* sextract_i32/i64 dest, t1, pos, len
+
+Extract a bitfield from T1, placing the result in DEST.
+The bitfield is described by POS/LEN, which are immediate values,
+as above for deposit. For extract_*, the result will be extended
+to the left with zeros; for sextract_*, the result will be extended
+to the left with copies of the bitfield sign bit at pos + len - 1.
+
+For example, "sextract_i32 dest, t1, 8, 4" indicates a 4-bit field
+at bit 8. This operation would be equivalent to
+
+ dest = (t1 << 20) >> 28
+
+(using an arithmetic right shift).
+
* extrl_i64_i32 t0, t1
For 64-bit hosts only, extract the low 32-bits of input T1 and place it
@@ -523,24 +547,29 @@ version. Aliases are specified in the input operands as for GCC.
The same register may be used for both an input and an output, even when
they are not explicitly aliased. If an op expands to multiple target
instructions then care must be taken to avoid clobbering input values.
-GCC style "early clobber" outputs are not currently supported.
+GCC style "early clobber" outputs are supported, with '&'.
A target can define specific register or constant constraints. If an
operation uses a constant input constraint which does not allow all
constants, it must also accept registers in order to have a fallback.
+The constraint 'i' is defined generically to accept any constant.
+The constraint 'r' is not defined generically, but is consistently
+used by each backend to indicate all registers.
The movi_i32 and movi_i64 operations must accept any constants.
The mov_i32 and mov_i64 operations must accept any registers of the
same type.
-The ld/st instructions must accept signed 32 bit constant offsets. It
-can be implemented by reserving a specific register to compute the
-address if the offset is too big.
+The ld/st/sti instructions must accept signed 32 bit constant offsets.
+This can be implemented by reserving a specific register in which to
+compute the address if the offset is too big.
The ld/st instructions must accept any destination (ld) or source (st)
register.
+The sti instruction may fail if it cannot store the given constant.
+
4.3) Function call assumptions
- The only supported types for parameters and return value are: 32 and
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index a1d101f891..1a5ea23844 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -62,7 +62,12 @@ typedef enum {
#define TCG_TARGET_HAS_eqv_i32 1
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
+#define TCG_TARGET_HAS_clz_i32 1
+#define TCG_TARGET_HAS_ctz_i32 1
+#define TCG_TARGET_HAS_ctpop_i32 0
#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
@@ -92,7 +97,12 @@ typedef enum {
#define TCG_TARGET_HAS_eqv_i64 1
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 1
+#define TCG_TARGET_HAS_ctpop_i64 0
#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 1
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 1939d3528f..6d227a5a6a 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -115,12 +115,10 @@ static inline void patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_MONE 0x800
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct,
- const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str = *pct_str;
-
- switch (ct_str[0]) {
+ switch (*ct_str++) {
case 'r':
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1);
@@ -150,12 +148,9 @@ static int target_parse_constraint(TCGArgConstraint *ct,
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
- return -1;
+ return NULL;
}
-
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
static inline bool is_aimm(uint64_t val)
@@ -344,8 +339,12 @@ typedef enum {
/* Conditional select instructions. */
I3506_CSEL = 0x1a800000,
I3506_CSINC = 0x1a800400,
+ I3506_CSINV = 0x5a800000,
+ I3506_CSNEG = 0x5a800400,
/* Data-processing (1 source) instructions. */
+ I3507_CLZ = 0x5ac01000,
+ I3507_RBIT = 0x5ac00000,
I3507_REV16 = 0x5ac00400,
I3507_REV32 = 0x5ac00800,
I3507_REV64 = 0x5ac00c00,
@@ -581,11 +580,9 @@ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
tcg_target_long value)
{
- AArch64Insn insn;
int i, wantinv, shift;
tcg_target_long svalue = value;
tcg_target_long ivalue = ~value;
- tcg_target_long imask;
/* For 32-bit values, discard potential garbage in value. For 64-bit
values within [2**31, 2**32-1], we can create smaller sequences by
@@ -631,42 +628,35 @@ static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
/* Would it take fewer insns to begin with MOVN? For the value and its
inverse, count the number of 16-bit lanes that are 0. */
- for (i = wantinv = imask = 0; i < 64; i += 16) {
+ for (i = wantinv = 0; i < 64; i += 16) {
tcg_target_long mask = 0xffffull << i;
- if ((value & mask) == 0) {
- wantinv -= 1;
- }
- if ((ivalue & mask) == 0) {
- wantinv += 1;
- imask |= mask;
- }
- }
-
- /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */
- insn = I3405_MOVZ;
- if (wantinv > 0) {
- value = ivalue;
- insn = I3405_MOVN;
+ wantinv -= ((value & mask) == 0);
+ wantinv += ((ivalue & mask) == 0);
}
- /* Find the lowest lane that is not 0x0000. */
- shift = ctz64(value) & (63 & -16);
- tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift);
-
- if (wantinv > 0) {
- /* Re-invert the value, so MOVK sees non-inverted bits. */
- value = ~value;
- /* Clear out all the 0xffff lanes. */
- value ^= imask;
- }
- /* Clear out the lane that we just set. */
- value &= ~(0xffffUL << shift);
-
- /* Iterate until all lanes have been set, and thus cleared from VALUE. */
- while (value) {
+ if (wantinv <= 0) {
+ /* Find the lowest lane that is not 0x0000. */
shift = ctz64(value) & (63 & -16);
- tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift);
+ tcg_out_insn(s, 3405, MOVZ, type, rd, value >> shift, shift);
+ /* Clear out the lane that we just set. */
value &= ~(0xffffUL << shift);
+ /* Iterate until all non-zero lanes have been processed. */
+ while (value) {
+ shift = ctz64(value) & (63 & -16);
+ tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift);
+ value &= ~(0xffffUL << shift);
+ }
+ } else {
+ /* Like above, but with the inverted value and MOVN to start. */
+ shift = ctz64(ivalue) & (63 & -16);
+ tcg_out_insn(s, 3405, MOVN, type, rd, ivalue >> shift, shift);
+ ivalue &= ~(0xffffUL << shift);
+ while (ivalue) {
+ shift = ctz64(ivalue) & (63 & -16);
+ /* Provide MOVK with the non-inverted value. */
+ tcg_out_insn(s, 3405, MOVK, type, rd, ~(ivalue >> shift), shift);
+ ivalue &= ~(0xffffUL << shift);
+ }
}
}
@@ -965,6 +955,15 @@ static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl,
insn = I3401_SUBSI;
bl = -bl;
}
+ if (unlikely(al == TCG_REG_XZR)) {
+ /* ??? We want to allow al to be zero for the benefit of
+ negation via subtraction. However, that leaves open the
+ possibility of adding 0+const in the low part, and the
+ immediate add instructions encode XSP not XZR. Don't try
+ anything more elaborate here than loading another zero. */
+ al = TCG_REG_TMP;
+ tcg_out_movi(s, ext, al, 0);
+ }
tcg_out_insn_3401(s, insn, ext, rl, al, bl);
} else {
tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl);
@@ -998,6 +997,37 @@ static inline void tcg_out_mb(TCGContext *s, TCGArg a0)
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
}
+static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
+ TCGReg a0, TCGArg b, bool const_b, bool is_ctz)
+{
+ TCGReg a1 = a0;
+ if (is_ctz) {
+ a1 = TCG_REG_TMP;
+ tcg_out_insn(s, 3507, RBIT, ext, a1, a0);
+ }
+ if (const_b && b == (ext ? 64 : 32)) {
+ tcg_out_insn(s, 3507, CLZ, ext, d, a1);
+ } else {
+ AArch64Insn sel = I3506_CSEL;
+
+ tcg_out_cmp(s, ext, a0, 0, 1);
+ tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP, a1);
+
+ if (const_b) {
+ if (b == -1) {
+ b = TCG_REG_XZR;
+ sel = I3506_CSINV;
+ } else if (b == 0) {
+ b = TCG_REG_XZR;
+ } else {
+ tcg_out_movi(s, ext, d, b);
+ b = d;
+ }
+ }
+ tcg_out_insn_3506(s, sel, ext, d, TCG_REG_TMP, b, TCG_COND_NE);
+ }
+}
+
#ifdef CONFIG_SOFTMMU
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* TCGMemOpIdx oi, uintptr_t ra)
@@ -1564,6 +1594,15 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
+ case INDEX_op_clz_i64:
+ case INDEX_op_clz_i32:
+ tcg_out_cltz(s, ext, a0, a1, a2, c2, false);
+ break;
+ case INDEX_op_ctz_i64:
+ case INDEX_op_ctz_i32:
+ tcg_out_cltz(s, ext, a0, a1, a2, c2, true);
+ break;
+
case INDEX_op_brcond_i32:
a1 = (int32_t)a1;
/* FALLTHRU */
@@ -1640,6 +1679,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
break;
+ case INDEX_op_extract_i64:
+ case INDEX_op_extract_i32:
+ tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
+ break;
+
+ case INDEX_op_sextract_i64:
+ case INDEX_op_sextract_i32:
+ tcg_out_sbfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
+ break;
+
case INDEX_op_add2_i32:
tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3),
(int32_t)args[4], args[5], const_args[4],
@@ -1745,11 +1794,15 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_sar_i32, { "r", "r", "ri" } },
{ INDEX_op_rotl_i32, { "r", "r", "ri" } },
{ INDEX_op_rotr_i32, { "r", "r", "ri" } },
+ { INDEX_op_clz_i32, { "r", "r", "rAL" } },
+ { INDEX_op_ctz_i32, { "r", "r", "rAL" } },
{ INDEX_op_shl_i64, { "r", "r", "ri" } },
{ INDEX_op_shr_i64, { "r", "r", "ri" } },
{ INDEX_op_sar_i64, { "r", "r", "ri" } },
{ INDEX_op_rotl_i64, { "r", "r", "ri" } },
{ INDEX_op_rotr_i64, { "r", "r", "ri" } },
+ { INDEX_op_clz_i64, { "r", "r", "rAL" } },
+ { INDEX_op_ctz_i64, { "r", "r", "rAL" } },
{ INDEX_op_brcond_i32, { "r", "rA" } },
{ INDEX_op_brcond_i64, { "r", "rA" } },
@@ -1785,6 +1838,10 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
{ INDEX_op_deposit_i64, { "r", "0", "rZ" } },
+ { INDEX_op_extract_i32, { "r", "r" } },
+ { INDEX_op_extract_i64, { "r", "r" } },
+ { INDEX_op_sextract_i32, { "r", "r" } },
+ { INDEX_op_sextract_i64, { "r", "r" } },
{ INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
{ INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } },
@@ -1798,6 +1855,18 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(aarch64_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (aarch64_op_defs[i].op == op) {
+ return &aarch64_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
static void tcg_target_init(TCGContext *s)
{
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
@@ -1820,8 +1889,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */
-
- tcg_add_target_add_op_defs(aarch64_op_defs);
}
/* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index a0e1acfa77..09a19c6f35 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -26,6 +26,37 @@
#ifndef ARM_TCG_TARGET_H
#define ARM_TCG_TARGET_H
+/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
+#ifndef __ARM_ARCH
+# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
+ || defined(__ARM_ARCH_7EM__)
+# define __ARM_ARCH 7
+# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
+# define __ARM_ARCH 6
+# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
+ || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
+ || defined(__ARM_ARCH_5TEJ__)
+# define __ARM_ARCH 5
+# else
+# define __ARM_ARCH 4
+# endif
+#endif
+
+extern int arm_arch;
+
+#if defined(__ARM_ARCH_5T__) \
+ || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+# define use_armv5t_instructions 1
+#else
+# define use_armv5t_instructions use_armv6_instructions
+#endif
+
+#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
+#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
+
#undef TCG_TARGET_STACK_GROWSUP
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
@@ -79,7 +110,12 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions
+#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
+#define TCG_TARGET_HAS_ctpop_i32 0
+#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
+#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
+#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_mulu2_i32 1
#define TCG_TARGET_HAS_muls2_i32 1
@@ -88,9 +124,6 @@ extern bool use_idiv_instructions;
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
#define TCG_TARGET_HAS_rem_i32 0
-extern bool tcg_target_deposit_valid(int ofs, int len);
-#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
-
enum {
TCG_AREG0 = TCG_REG_R6,
};
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index ffa0d40660..e75a6d4943 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -25,36 +25,7 @@
#include "elf.h"
#include "tcg-be-ldst.h"
-/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
-#ifndef __ARM_ARCH
-# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
- || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
- || defined(__ARM_ARCH_7EM__)
-# define __ARM_ARCH 7
-# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
- || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
- || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
-# define __ARM_ARCH 6
-# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
- || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
- || defined(__ARM_ARCH_5TEJ__)
-# define __ARM_ARCH 5
-# else
-# define __ARM_ARCH 4
-# endif
-#endif
-
-static int arm_arch = __ARM_ARCH;
-
-#if defined(__ARM_ARCH_5T__) \
- || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
-# define use_armv5t_instructions 1
-#else
-# define use_armv5t_instructions use_armv6_instructions
-#endif
-
-#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
-#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
+int arm_arch = __ARM_ARCH;
#ifndef use_idiv_instructions
bool use_idiv_instructions;
@@ -143,12 +114,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_ZERO 0x800
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch (ct_str[0]) {
+ switch (*ct_str++) {
case 'I':
ct->ct |= TCG_CT_CONST_ARM;
break;
@@ -201,12 +170,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
-
- return 0;
+ return ct_str;
}
static inline uint32_t rotl(uint32_t val, int n)
@@ -290,6 +256,9 @@ typedef enum {
ARITH_BIC = 0xe << 21,
ARITH_MVN = 0xf << 21,
+ INSN_CLZ = 0x016f0f10,
+ INSN_RBIT = 0x06ff0f30,
+
INSN_LDR_IMM = 0x04100000,
INSN_LDR_REG = 0x06100000,
INSN_STR_IMM = 0x04000000,
@@ -730,16 +699,6 @@ static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
}
}
-bool tcg_target_deposit_valid(int ofs, int len)
-{
- /* ??? Without bfi, we could improve over generic code by combining
- the right-shift from a non-zero ofs with the orr. We do run into
- problems when rd == rs, and the mask generated from ofs+len doesn't
- fit into an immediate. We would have to be careful not to pessimize
- wrt the optimizations performed on the expanded code. */
- return use_armv7_instructions;
-}
-
static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
TCGArg a1, int ofs, int len, bool const_a1)
{
@@ -752,6 +711,22 @@ static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
| (ofs << 7) | ((ofs + len - 1) << 16));
}
+static inline void tcg_out_extract(TCGContext *s, int cond, TCGReg rd,
+ TCGArg a1, int ofs, int len)
+{
+ /* ubfx */
+ tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | a1
+ | (ofs << 7) | ((len - 1) << 16));
+}
+
+static inline void tcg_out_sextract(TCGContext *s, int cond, TCGReg rd,
+ TCGArg a1, int ofs, int len)
+{
+ /* sbfx */
+ tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | a1
+ | (ofs << 7) | ((len - 1) << 16));
+}
+
/* Note that this routine is used for both LDR and LDRH formats, so we do
not wish to include an immediate shift at this point. */
static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
@@ -1857,6 +1832,28 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
break;
+ case INDEX_op_ctz_i32:
+ tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
+ a1 = TCG_REG_TMP;
+ goto do_clz;
+
+ case INDEX_op_clz_i32:
+ a1 = args[1];
+ do_clz:
+ a0 = args[0];
+ a2 = args[2];
+ c = const_args[2];
+ if (c && a2 == 32) {
+ tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
+ break;
+ }
+ tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
+ tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
+ if (c || a0 != a2) {
+ tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
+ }
+ break;
+
case INDEX_op_brcond_i32:
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
args[0], args[1], const_args[1]);
@@ -1933,6 +1930,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_deposit(s, COND_AL, args[0], args[2],
args[3], args[4], const_args[2]);
break;
+ case INDEX_op_extract_i32:
+ tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
+ break;
+ case INDEX_op_sextract_i32:
+ tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
+ break;
case INDEX_op_div_i32:
tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
@@ -1985,6 +1988,8 @@ static const TCGTargetOpDef arm_op_defs[] = {
{ INDEX_op_sar_i32, { "r", "r", "ri" } },
{ INDEX_op_rotl_i32, { "r", "r", "ri" } },
{ INDEX_op_rotr_i32, { "r", "r", "ri" } },
+ { INDEX_op_clz_i32, { "r", "r", "rIK" } },
+ { INDEX_op_ctz_i32, { "r", "r", "rIK" } },
{ INDEX_op_brcond_i32, { "r", "rIN" } },
{ INDEX_op_setcond_i32, { "r", "r", "rIN" } },
@@ -2015,6 +2020,8 @@ static const TCGTargetOpDef arm_op_defs[] = {
{ INDEX_op_ext16u_i32, { "r", "r" } },
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
+ { INDEX_op_extract_i32, { "r", "r" } },
+ { INDEX_op_sextract_i32, { "r", "r" } },
{ INDEX_op_div_i32, { "r", "r", "r" } },
{ INDEX_op_divu_i32, { "r", "r", "r" } },
@@ -2023,6 +2030,18 @@ static const TCGTargetOpDef arm_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(arm_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (arm_op_defs[i].op == op) {
+ return &arm_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
static void tcg_target_init(TCGContext *s)
{
/* Only probe for the platform and capabilities if we havn't already
@@ -2053,8 +2072,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
-
- tcg_add_target_add_op_defs(arm_op_defs);
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index 524cfc61fd..21d96ec35c 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -76,6 +76,7 @@ typedef enum {
#endif
extern bool have_bmi1;
+extern bool have_popcnt;
/* optional instructions */
#define TCG_TARGET_HAS_div2_i32 1
@@ -93,7 +94,12 @@ extern bool have_bmi1;
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
+#define TCG_TARGET_HAS_clz_i32 1
+#define TCG_TARGET_HAS_ctz_i32 1
+#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 1
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
@@ -123,7 +129,12 @@ extern bool have_bmi1;
#define TCG_TARGET_HAS_eqv_i64 0
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 1
+#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
@@ -138,6 +149,12 @@ extern bool have_bmi1;
((ofs) == 0 && (len) == 16))
#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
+/* Check for the possibility of high-byte extraction and, for 64-bit,
+ zero-extending 32-bit right-shift. */
+#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8)
+#define TCG_TARGET_extract_i64_valid(ofs, len) \
+ (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
+
#if TCG_TARGET_REG_BITS == 64
# define TCG_AREG0 TCG_REG_R14
#else
diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c
index eeb1777bbb..5918008296 100644
--- a/tcg/i386/tcg-target.inc.c
+++ b/tcg/i386/tcg-target.inc.c
@@ -92,6 +92,7 @@ static const int tcg_target_call_oarg_regs[] = {
#define TCG_CT_CONST_S32 0x100
#define TCG_CT_CONST_U32 0x200
#define TCG_CT_CONST_I32 0x400
+#define TCG_CT_CONST_WSZ 0x800
/* Registers used with L constraint, which are the first argument
registers on x86_64, and two random call clobbered registers on
@@ -129,15 +130,21 @@ static bool have_movbe;
# define have_movbe 0
#endif
-/* We need this symbol in tcg-target.h, and we can't properly conditionalize
+/* We need these symbols in tcg-target.h, and we can't properly conditionalize
it there. Therefore we always define the variable. */
bool have_bmi1;
+bool have_popcnt;
#if defined(CONFIG_CPUID_H) && defined(bit_BMI2)
static bool have_bmi2;
#else
# define have_bmi2 0
#endif
+#if defined(CONFIG_CPUID_H) && defined(bit_LZCNT)
+static bool have_lzcnt;
+#else
+# define have_lzcnt 0
+#endif
static tcg_insn_unit *tb_ret_addr;
@@ -166,12 +173,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch(ct_str[0]) {
+ switch(*ct_str++) {
case 'a':
ct->ct |= TCG_CT_REG;
tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
@@ -181,7 +186,6 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
break;
case 'c':
- case_c:
ct->ct |= TCG_CT_REG;
tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
break;
@@ -210,7 +214,6 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set32(ct->u.regs, 0, 0xf);
break;
case 'r':
- case_r:
ct->ct |= TCG_CT_REG;
if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(ct->u.regs, 0, 0xffff);
@@ -218,13 +221,10 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set32(ct->u.regs, 0, 0xff);
}
break;
- case 'C':
- /* With SHRX et al, we need not use ECX as shift count register. */
- if (have_bmi2) {
- goto case_r;
- } else {
- goto case_c;
- }
+ case 'W':
+ /* With TZCNT/LZCNT, we can have operand-size as an input. */
+ ct->ct |= TCG_CT_CONST_WSZ;
+ break;
/* qemu_ld/st address constraint */
case 'L':
@@ -239,21 +239,19 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
break;
case 'e':
- ct->ct |= TCG_CT_CONST_S32;
+ ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_S32);
break;
case 'Z':
- ct->ct |= TCG_CT_CONST_U32;
+ ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_U32);
break;
case 'I':
- ct->ct |= TCG_CT_CONST_I32;
+ ct->ct |= (type == TCG_TYPE_I32 ? TCG_CT_CONST : TCG_CT_CONST_I32);
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
/* test if a constant matches the constraint */
@@ -273,6 +271,9 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
return 1;
}
+ if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ return 1;
+ }
return 0;
}
@@ -306,6 +307,8 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
#define OPC_ANDN (0xf2 | P_EXT38)
#define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
+#define OPC_BSF (0xbc | P_EXT)
+#define OPC_BSR (0xbd | P_EXT)
#define OPC_BSWAP (0xc8 | P_EXT)
#define OPC_CALL_Jz (0xe8)
#define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
@@ -320,6 +323,7 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
#define OPC_JMP_long (0xe9)
#define OPC_JMP_short (0xeb)
#define OPC_LEA (0x8d)
+#define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
#define OPC_MOVB_EvGv (0x88) /* stores, more or less */
#define OPC_MOVL_EvGv (0x89) /* stores, more or less */
#define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
@@ -334,6 +338,7 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
#define OPC_MOVZBL (0xb6 | P_EXT)
#define OPC_MOVZWL (0xb7 | P_EXT)
#define OPC_POP_r32 (0x58)
+#define OPC_POPCNT (0xb8 | P_EXT | P_SIMDF3)
#define OPC_PUSH_r32 (0x50)
#define OPC_PUSH_Iv (0x68)
#define OPC_PUSH_Ib (0x6a)
@@ -346,6 +351,7 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
#define OPC_TESTL (0x85)
+#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
#define OPC_XCHG_ax_r32 (0x90)
#define OPC_GRP3_Ev (0xf7)
@@ -431,6 +437,11 @@ static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x)
if (opc & P_ADDR32) {
tcg_out8(s, 0x67);
}
+ if (opc & P_SIMDF3) {
+ tcg_out8(s, 0xf3);
+ } else if (opc & P_SIMDF2) {
+ tcg_out8(s, 0xf2);
+ }
rex = 0;
rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
@@ -465,6 +476,11 @@ static void tcg_out_opc(TCGContext *s, int opc)
if (opc & P_DATA16) {
tcg_out8(s, 0x66);
}
+ if (opc & P_SIMDF3) {
+ tcg_out8(s, 0xf3);
+ } else if (opc & P_SIMDF2) {
+ tcg_out8(s, 0xf2);
+ }
if (opc & (P_EXT | P_EXT38)) {
tcg_out8(s, 0x0f);
if (opc & P_EXT38) {
@@ -1093,13 +1109,11 @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
}
#endif
-static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
- TCGArg c1, TCGArg c2, int const_c2,
- TCGArg v1)
+static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
+ TCGReg dest, TCGReg v1)
{
- tcg_out_cmp(s, c1, c2, const_c2, 0);
if (have_cmov) {
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
+ tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
} else {
TCGLabel *over = gen_new_label();
tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
@@ -1108,16 +1122,68 @@ static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
}
}
+static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
+ TCGReg c1, TCGArg c2, int const_c2,
+ TCGReg v1)
+{
+ tcg_out_cmp(s, c1, c2, const_c2, 0);
+ tcg_out_cmov(s, cond, 0, dest, v1);
+}
+
#if TCG_TARGET_REG_BITS == 64
-static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
- TCGArg c1, TCGArg c2, int const_c2,
- TCGArg v1)
+static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
+ TCGReg c1, TCGArg c2, int const_c2,
+ TCGReg v1)
{
tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
+ tcg_out_cmov(s, cond, P_REXW, dest, v1);
}
#endif
+static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
+ TCGArg arg2, bool const_a2)
+{
+ if (have_bmi1) {
+ tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
+ if (const_a2) {
+ tcg_debug_assert(arg2 == (rexw ? 64 : 32));
+ } else {
+ tcg_debug_assert(dest != arg2);
+ tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
+ }
+ } else {
+ tcg_debug_assert(dest != arg2);
+ tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
+ tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
+ }
+}
+
+static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
+ TCGArg arg2, bool const_a2)
+{
+ if (have_lzcnt) {
+ tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
+ if (const_a2) {
+ tcg_debug_assert(arg2 == (rexw ? 64 : 32));
+ } else {
+ tcg_debug_assert(dest != arg2);
+ tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
+ }
+ } else {
+ tcg_debug_assert(!const_a2);
+ tcg_debug_assert(dest != arg1);
+ tcg_debug_assert(dest != arg2);
+
+ /* Recall that the output of BSR is the index not the count. */
+ tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
+ tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
+
+ /* Since we have destroyed the flags from BSR, we have to re-test. */
+ tcg_out_cmp(s, arg1, 0, 1, rexw);
+ tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
+ }
+}
+
static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
{
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
@@ -1795,7 +1861,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)
{
- int c, vexop, rexw = 0;
+ TCGArg a0, a1, a2;
+ int c, const_a2, vexop, rexw = 0;
#if TCG_TARGET_REG_BITS == 64
# define OP_32_64(x) \
@@ -1807,9 +1874,15 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case glue(glue(INDEX_op_, x), _i32)
#endif
- switch(opc) {
+ /* Hoist the loads of the most common arguments. */
+ a0 = args[0];
+ a1 = args[1];
+ a2 = args[2];
+ const_a2 = const_args[2];
+
+ switch (opc) {
case INDEX_op_exit_tb:
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
tcg_out_jmp(s, tb_ret_addr);
break;
case INDEX_op_goto_tb:
@@ -1824,57 +1897,53 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_nopn(s, gap - 1);
}
tcg_out8(s, OPC_JMP_long); /* jmp im */
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
tcg_out32(s, 0);
} else {
/* indirect jump method */
tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
- (intptr_t)(s->tb_jmp_target_addr + args[0]));
+ (intptr_t)(s->tb_jmp_target_addr + a0));
}
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
+ s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
break;
case INDEX_op_br:
- tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0);
+ tcg_out_jxx(s, JCC_JMP, arg_label(a0), 0);
break;
OP_32_64(ld8u):
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
- tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVZBL, a0, a1, a2);
break;
OP_32_64(ld8s):
- tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, a0, a1, a2);
break;
OP_32_64(ld16u):
/* Note that we can ignore REXW for the zero-extend to 64-bit. */
- tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVZWL, a0, a1, a2);
break;
OP_32_64(ld16s):
- tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, a0, a1, a2);
break;
#if TCG_TARGET_REG_BITS == 64
case INDEX_op_ld32u_i64:
#endif
case INDEX_op_ld_i32:
- tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+ tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
break;
OP_32_64(st8):
if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVB_EvIz,
- 0, args[1], args[2]);
- tcg_out8(s, args[0]);
+ tcg_out_modrm_offset(s, OPC_MOVB_EvIz, 0, a1, a2);
+ tcg_out8(s, a0);
} else {
- tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R,
- args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, a0, a1, a2);
}
break;
OP_32_64(st16):
if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16,
- 0, args[1], args[2]);
- tcg_out16(s, args[0]);
+ tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_DATA16, 0, a1, a2);
+ tcg_out16(s, a0);
} else {
- tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16,
- args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, a0, a1, a2);
}
break;
#if TCG_TARGET_REG_BITS == 64
@@ -1882,19 +1951,18 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
#endif
case INDEX_op_st_i32:
if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, args[1], args[2]);
- tcg_out32(s, args[0]);
+ tcg_out_modrm_offset(s, OPC_MOVL_EvIz, 0, a1, a2);
+ tcg_out32(s, a0);
} else {
- tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+ tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
}
break;
OP_32_64(add):
/* For 3-operand addition, use LEA. */
- if (args[0] != args[1]) {
- TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0;
-
- if (const_args[2]) {
+ if (a0 != a1) {
+ TCGArg c3 = 0;
+ if (const_a2) {
c3 = a2, a2 = -1;
} else if (a0 == a2) {
/* Watch out for dest = src + dest, since we've removed
@@ -1921,36 +1989,35 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
c = ARITH_XOR;
goto gen_arith;
gen_arith:
- if (const_args[2]) {
- tgen_arithi(s, c + rexw, args[0], args[2], 0);
+ if (const_a2) {
+ tgen_arithi(s, c + rexw, a0, a2, 0);
} else {
- tgen_arithr(s, c + rexw, args[0], args[2]);
+ tgen_arithr(s, c + rexw, a0, a2);
}
break;
OP_32_64(andc):
- if (const_args[2]) {
- tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32,
- args[0], args[1]);
- tgen_arithi(s, ARITH_AND + rexw, args[0], ~args[2], 0);
+ if (const_a2) {
+ tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
+ tgen_arithi(s, ARITH_AND + rexw, a0, ~a2, 0);
} else {
- tcg_out_vex_modrm(s, OPC_ANDN + rexw, args[0], args[2], args[1]);
+ tcg_out_vex_modrm(s, OPC_ANDN + rexw, a0, a2, a1);
}
break;
OP_32_64(mul):
- if (const_args[2]) {
+ if (const_a2) {
int32_t val;
- val = args[2];
+ val = a2;
if (val == (int8_t)val) {
- tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]);
+ tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, a0, a0);
tcg_out8(s, val);
} else {
- tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]);
+ tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, a0, a0);
tcg_out32(s, val);
}
} else {
- tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]);
+ tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, a0, a2);
}
break;
@@ -1962,6 +2029,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
OP_32_64(shl):
+ /* For small constant 3-operand shift, use LEA. */
+ if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
+ if (a2 - 1 == 0) {
+ /* shl $1,a1,a0 -> lea (a1,a1),a0 */
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
+ } else {
+ /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
+ }
+ break;
+ }
c = SHIFT_SHL;
vexop = OPC_SHLX;
goto gen_shift_maybe_vex;
@@ -1980,57 +2058,67 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
c = SHIFT_ROR;
goto gen_shift;
gen_shift_maybe_vex:
- if (have_bmi2 && !const_args[2]) {
- tcg_out_vex_modrm(s, vexop + rexw, args[0], args[2], args[1]);
- break;
+ if (have_bmi2) {
+ if (!const_a2) {
+ tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
+ break;
+ }
+ tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
}
/* FALLTHRU */
gen_shift:
- if (const_args[2]) {
- tcg_out_shifti(s, c + rexw, args[0], args[2]);
+ if (const_a2) {
+ tcg_out_shifti(s, c + rexw, a0, a2);
} else {
- tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]);
+ tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, a0);
}
break;
+ OP_32_64(ctz):
+ tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
+ break;
+ OP_32_64(clz):
+ tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
+ break;
+ OP_32_64(ctpop):
+ tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
+ break;
+
case INDEX_op_brcond_i32:
- tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1],
- arg_label(args[3]), 0);
+ tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
break;
case INDEX_op_setcond_i32:
- tcg_out_setcond32(s, args[3], args[0], args[1],
- args[2], const_args[2]);
+ tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
break;
case INDEX_op_movcond_i32:
- tcg_out_movcond32(s, args[5], args[0], args[1],
- args[2], const_args[2], args[3]);
+ tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
break;
OP_32_64(bswap16):
- tcg_out_rolw_8(s, args[0]);
+ tcg_out_rolw_8(s, a0);
break;
OP_32_64(bswap32):
- tcg_out_bswap32(s, args[0]);
+ tcg_out_bswap32(s, a0);
break;
OP_32_64(neg):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]);
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, a0);
break;
OP_32_64(not):
- tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]);
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, a0);
break;
OP_32_64(ext8s):
- tcg_out_ext8s(s, args[0], args[1], rexw);
+ tcg_out_ext8s(s, a0, a1, rexw);
break;
OP_32_64(ext16s):
- tcg_out_ext16s(s, args[0], args[1], rexw);
+ tcg_out_ext16s(s, a0, a1, rexw);
break;
OP_32_64(ext8u):
- tcg_out_ext8u(s, args[0], args[1]);
+ tcg_out_ext8u(s, a0, a1);
break;
OP_32_64(ext16u):
- tcg_out_ext16u(s, args[0], args[1]);
+ tcg_out_ext16u(s, a0, a1);
break;
case INDEX_op_qemu_ld_i32:
@@ -2054,26 +2142,26 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
OP_32_64(add2):
if (const_args[4]) {
- tgen_arithi(s, ARITH_ADD + rexw, args[0], args[4], 1);
+ tgen_arithi(s, ARITH_ADD + rexw, a0, args[4], 1);
} else {
- tgen_arithr(s, ARITH_ADD + rexw, args[0], args[4]);
+ tgen_arithr(s, ARITH_ADD + rexw, a0, args[4]);
}
if (const_args[5]) {
- tgen_arithi(s, ARITH_ADC + rexw, args[1], args[5], 1);
+ tgen_arithi(s, ARITH_ADC + rexw, a1, args[5], 1);
} else {
- tgen_arithr(s, ARITH_ADC + rexw, args[1], args[5]);
+ tgen_arithr(s, ARITH_ADC + rexw, a1, args[5]);
}
break;
OP_32_64(sub2):
if (const_args[4]) {
- tgen_arithi(s, ARITH_SUB + rexw, args[0], args[4], 1);
+ tgen_arithi(s, ARITH_SUB + rexw, a0, args[4], 1);
} else {
- tgen_arithr(s, ARITH_SUB + rexw, args[0], args[4]);
+ tgen_arithr(s, ARITH_SUB + rexw, a0, args[4]);
}
if (const_args[5]) {
- tgen_arithi(s, ARITH_SBB + rexw, args[1], args[5], 1);
+ tgen_arithi(s, ARITH_SBB + rexw, a1, args[5], 1);
} else {
- tgen_arithr(s, ARITH_SBB + rexw, args[1], args[5]);
+ tgen_arithr(s, ARITH_SBB + rexw, a1, args[5]);
}
break;
@@ -2086,65 +2174,94 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
#else /* TCG_TARGET_REG_BITS == 64 */
case INDEX_op_ld32s_i64:
- tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]);
+ tcg_out_modrm_offset(s, OPC_MOVSLQ, a0, a1, a2);
break;
case INDEX_op_ld_i64:
- tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+ tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
break;
case INDEX_op_st_i64:
if (const_args[0]) {
- tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW,
- 0, args[1], args[2]);
- tcg_out32(s, args[0]);
+ tcg_out_modrm_offset(s, OPC_MOVL_EvIz | P_REXW, 0, a1, a2);
+ tcg_out32(s, a0);
} else {
- tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+ tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
}
break;
case INDEX_op_brcond_i64:
- tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1],
- arg_label(args[3]), 0);
+ tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
break;
case INDEX_op_setcond_i64:
- tcg_out_setcond64(s, args[3], args[0], args[1],
- args[2], const_args[2]);
+ tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
break;
case INDEX_op_movcond_i64:
- tcg_out_movcond64(s, args[5], args[0], args[1],
- args[2], const_args[2], args[3]);
+ tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
break;
case INDEX_op_bswap64_i64:
- tcg_out_bswap64(s, args[0]);
+ tcg_out_bswap64(s, a0);
break;
case INDEX_op_extu_i32_i64:
case INDEX_op_ext32u_i64:
- tcg_out_ext32u(s, args[0], args[1]);
+ tcg_out_ext32u(s, a0, a1);
break;
case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
- tcg_out_ext32s(s, args[0], args[1]);
+ tcg_out_ext32s(s, a0, a1);
break;
#endif
OP_32_64(deposit):
if (args[3] == 0 && args[4] == 8) {
/* load bits 0..7 */
- tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM,
- args[2], args[0]);
+ tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
} else if (args[3] == 8 && args[4] == 8) {
/* load bits 8..15 */
- tcg_out_modrm(s, OPC_MOVB_EvGv, args[2], args[0] + 4);
+ tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
} else if (args[3] == 0 && args[4] == 16) {
/* load bits 0..15 */
- tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, args[2], args[0]);
+ tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
} else {
tcg_abort();
}
break;
+ case INDEX_op_extract_i64:
+ if (a2 + args[3] == 32) {
+ /* This is a 32-bit zero-extending right shift. */
+ tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
+ tcg_out_shifti(s, SHIFT_SHR, a0, a2);
+ break;
+ }
+ /* FALLTHRU */
+ case INDEX_op_extract_i32:
+ /* On the off-chance that we can use the high-byte registers.
+ Otherwise we emit the same ext16 + shift pattern that we
+ would have gotten from the normal tcg-op.c expansion. */
+ tcg_debug_assert(a2 == 8 && args[3] == 8);
+ if (a1 < 4 && a0 < 8) {
+ tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
+ } else {
+ tcg_out_ext16u(s, a0, a1);
+ tcg_out_shifti(s, SHIFT_SHR, a0, 8);
+ }
+ break;
+
+ case INDEX_op_sextract_i32:
+ /* We don't implement sextract_i64, as we cannot sign-extend to
+ 64-bits without using the REX prefix that explicitly excludes
+ access to the high-byte registers. */
+ tcg_debug_assert(a2 == 8 && args[3] == 8);
+ if (a1 < 4 && a0 < 8) {
+ tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
+ } else {
+ tcg_out_ext16s(s, a0, a1, 0);
+ tcg_out_shifti(s, SHIFT_SAR, a0, 8);
+ }
+ break;
+
case INDEX_op_mb:
- tcg_out_mb(s, args[0]);
+ tcg_out_mb(s, a0);
break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_mov_i64:
@@ -2158,139 +2275,231 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
#undef OP_32_64
}
-static const TCGTargetOpDef x86_op_defs[] = {
- { INDEX_op_exit_tb, { } },
- { INDEX_op_goto_tb, { } },
- { INDEX_op_br, { } },
- { INDEX_op_ld8u_i32, { "r", "r" } },
- { INDEX_op_ld8s_i32, { "r", "r" } },
- { INDEX_op_ld16u_i32, { "r", "r" } },
- { INDEX_op_ld16s_i32, { "r", "r" } },
- { INDEX_op_ld_i32, { "r", "r" } },
- { INDEX_op_st8_i32, { "qi", "r" } },
- { INDEX_op_st16_i32, { "ri", "r" } },
- { INDEX_op_st_i32, { "ri", "r" } },
-
- { INDEX_op_add_i32, { "r", "r", "ri" } },
- { INDEX_op_sub_i32, { "r", "0", "ri" } },
- { INDEX_op_mul_i32, { "r", "0", "ri" } },
- { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
- { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
- { INDEX_op_and_i32, { "r", "0", "ri" } },
- { INDEX_op_or_i32, { "r", "0", "ri" } },
- { INDEX_op_xor_i32, { "r", "0", "ri" } },
- { INDEX_op_andc_i32, { "r", "r", "ri" } },
-
- { INDEX_op_shl_i32, { "r", "0", "Ci" } },
- { INDEX_op_shr_i32, { "r", "0", "Ci" } },
- { INDEX_op_sar_i32, { "r", "0", "Ci" } },
- { INDEX_op_rotl_i32, { "r", "0", "ci" } },
- { INDEX_op_rotr_i32, { "r", "0", "ci" } },
-
- { INDEX_op_brcond_i32, { "r", "ri" } },
-
- { INDEX_op_bswap16_i32, { "r", "0" } },
- { INDEX_op_bswap32_i32, { "r", "0" } },
-
- { INDEX_op_neg_i32, { "r", "0" } },
-
- { INDEX_op_not_i32, { "r", "0" } },
-
- { INDEX_op_ext8s_i32, { "r", "q" } },
- { INDEX_op_ext16s_i32, { "r", "r" } },
- { INDEX_op_ext8u_i32, { "r", "q" } },
- { INDEX_op_ext16u_i32, { "r", "r" } },
-
- { INDEX_op_setcond_i32, { "q", "r", "ri" } },
-
- { INDEX_op_deposit_i32, { "Q", "0", "Q" } },
- { INDEX_op_movcond_i32, { "r", "r", "ri", "r", "0" } },
-
- { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
- { INDEX_op_muls2_i32, { "a", "d", "a", "r" } },
- { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
- { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
-
- { INDEX_op_mb, { } },
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ static const TCGTargetOpDef ri_r = { .args_ct_str = { "ri", "r" } };
+ static const TCGTargetOpDef re_r = { .args_ct_str = { "re", "r" } };
+ static const TCGTargetOpDef qi_r = { .args_ct_str = { "qi", "r" } };
+ static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } };
+ static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
+ static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
+ static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
+ static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
+ static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
+ static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
+ static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
+ static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
+ static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
+ static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } };
+ static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } };
+ static const TCGTargetOpDef L_L_L = { .args_ct_str = { "L", "L", "L" } };
+ static const TCGTargetOpDef r_r_L_L
+ = { .args_ct_str = { "r", "r", "L", "L" } };
+ static const TCGTargetOpDef L_L_L_L
+ = { .args_ct_str = { "L", "L", "L", "L" } };
+
+ switch (op) {
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld_i64:
+ return &r_r;
-#if TCG_TARGET_REG_BITS == 32
- { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
- { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } },
-#else
- { INDEX_op_ld8u_i64, { "r", "r" } },
- { INDEX_op_ld8s_i64, { "r", "r" } },
- { INDEX_op_ld16u_i64, { "r", "r" } },
- { INDEX_op_ld16s_i64, { "r", "r" } },
- { INDEX_op_ld32u_i64, { "r", "r" } },
- { INDEX_op_ld32s_i64, { "r", "r" } },
- { INDEX_op_ld_i64, { "r", "r" } },
- { INDEX_op_st8_i64, { "ri", "r" } },
- { INDEX_op_st16_i64, { "ri", "r" } },
- { INDEX_op_st32_i64, { "ri", "r" } },
- { INDEX_op_st_i64, { "re", "r" } },
-
- { INDEX_op_add_i64, { "r", "r", "re" } },
- { INDEX_op_mul_i64, { "r", "0", "re" } },
- { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } },
- { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } },
- { INDEX_op_sub_i64, { "r", "0", "re" } },
- { INDEX_op_and_i64, { "r", "0", "reZ" } },
- { INDEX_op_or_i64, { "r", "0", "re" } },
- { INDEX_op_xor_i64, { "r", "0", "re" } },
- { INDEX_op_andc_i64, { "r", "r", "rI" } },
-
- { INDEX_op_shl_i64, { "r", "0", "Ci" } },
- { INDEX_op_shr_i64, { "r", "0", "Ci" } },
- { INDEX_op_sar_i64, { "r", "0", "Ci" } },
- { INDEX_op_rotl_i64, { "r", "0", "ci" } },
- { INDEX_op_rotr_i64, { "r", "0", "ci" } },
-
- { INDEX_op_brcond_i64, { "r", "re" } },
- { INDEX_op_setcond_i64, { "r", "r", "re" } },
-
- { INDEX_op_bswap16_i64, { "r", "0" } },
- { INDEX_op_bswap32_i64, { "r", "0" } },
- { INDEX_op_bswap64_i64, { "r", "0" } },
- { INDEX_op_neg_i64, { "r", "0" } },
- { INDEX_op_not_i64, { "r", "0" } },
-
- { INDEX_op_ext8s_i64, { "r", "r" } },
- { INDEX_op_ext16s_i64, { "r", "r" } },
- { INDEX_op_ext32s_i64, { "r", "r" } },
- { INDEX_op_ext8u_i64, { "r", "r" } },
- { INDEX_op_ext16u_i64, { "r", "r" } },
- { INDEX_op_ext32u_i64, { "r", "r" } },
-
- { INDEX_op_ext_i32_i64, { "r", "r" } },
- { INDEX_op_extu_i32_i64, { "r", "r" } },
-
- { INDEX_op_deposit_i64, { "Q", "0", "Q" } },
- { INDEX_op_movcond_i64, { "r", "r", "re", "r", "0" } },
-
- { INDEX_op_mulu2_i64, { "a", "d", "a", "r" } },
- { INDEX_op_muls2_i64, { "a", "d", "a", "r" } },
- { INDEX_op_add2_i64, { "r", "r", "0", "1", "re", "re" } },
- { INDEX_op_sub2_i64, { "r", "r", "0", "1", "re", "re" } },
-#endif
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ return &qi_r;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ return &ri_r;
+ case INDEX_op_st_i64:
+ return &re_r;
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ return &r_r_re;
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ return &r_0_re;
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ {
+ static const TCGTargetOpDef and
+ = { .args_ct_str = { "r", "0", "reZ" } };
+ return &and;
+ }
+ break;
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ {
+ static const TCGTargetOpDef andc
+ = { .args_ct_str = { "r", "r", "rI" } };
+ return &andc;
+ }
+ break;
-#if TCG_TARGET_REG_BITS == 64
- { INDEX_op_qemu_ld_i32, { "r", "L" } },
- { INDEX_op_qemu_st_i32, { "L", "L" } },
- { INDEX_op_qemu_ld_i64, { "r", "L" } },
- { INDEX_op_qemu_st_i64, { "L", "L" } },
-#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
- { INDEX_op_qemu_ld_i32, { "r", "L" } },
- { INDEX_op_qemu_st_i32, { "L", "L" } },
- { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
- { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
-#else
- { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
- { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
- { INDEX_op_qemu_ld_i64, { "r", "r", "L", "L" } },
- { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
-#endif
- { -1 },
-};
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ return have_bmi2 ? &r_r_ri : &r_0_ci;
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ return &r_0_ci;
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return &r_re;
+
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_neg_i32:
+ case INDEX_op_neg_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ return &r_0;
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ return &r_q;
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_sextract_i32:
+ case INDEX_op_ctpop_i32:
+ case INDEX_op_ctpop_i64:
+ return &r_r;
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ {
+ static const TCGTargetOpDef dep
+ = { .args_ct_str = { "Q", "0", "Q" } };
+ return &dep;
+ }
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ {
+ static const TCGTargetOpDef setc
+ = { .args_ct_str = { "q", "r", "re" } };
+ return &setc;
+ }
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ {
+ static const TCGTargetOpDef movc
+ = { .args_ct_str = { "r", "r", "re", "r", "0" } };
+ return &movc;
+ }
+ case INDEX_op_div2_i32:
+ case INDEX_op_div2_i64:
+ case INDEX_op_divu2_i32:
+ case INDEX_op_divu2_i64:
+ {
+ static const TCGTargetOpDef div2
+ = { .args_ct_str = { "a", "d", "0", "1", "r" } };
+ return &div2;
+ }
+ case INDEX_op_mulu2_i32:
+ case INDEX_op_mulu2_i64:
+ case INDEX_op_muls2_i32:
+ case INDEX_op_muls2_i64:
+ {
+ static const TCGTargetOpDef mul2
+ = { .args_ct_str = { "a", "d", "a", "r" } };
+ return &mul2;
+ }
+ case INDEX_op_add2_i32:
+ case INDEX_op_add2_i64:
+ case INDEX_op_sub2_i32:
+ case INDEX_op_sub2_i64:
+ {
+ static const TCGTargetOpDef arith2
+ = { .args_ct_str = { "r", "r", "0", "1", "re", "re" } };
+ return &arith2;
+ }
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ {
+ static const TCGTargetOpDef ctz[2] = {
+ { .args_ct_str = { "&r", "r", "r" } },
+ { .args_ct_str = { "&r", "r", "rW" } },
+ };
+ return &ctz[have_bmi1];
+ }
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ {
+ static const TCGTargetOpDef clz[2] = {
+ { .args_ct_str = { "&r", "r", "r" } },
+ { .args_ct_str = { "&r", "r", "rW" } },
+ };
+ return &clz[have_lzcnt];
+ }
+
+ case INDEX_op_qemu_ld_i32:
+ return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
+ case INDEX_op_qemu_st_i32:
+ return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L : &L_L_L;
+ case INDEX_op_qemu_ld_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? &r_L
+ : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L
+ : &r_r_L_L);
+ case INDEX_op_qemu_st_i64:
+ return (TCG_TARGET_REG_BITS == 64 ? &L_L
+ : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &L_L_L
+ : &L_L_L_L);
+
+ case INDEX_op_brcond2_i32:
+ {
+ static const TCGTargetOpDef b2
+ = { .args_ct_str = { "r", "r", "ri", "ri" } };
+ return &b2;
+ }
+ case INDEX_op_setcond2_i32:
+ {
+ static const TCGTargetOpDef s2
+ = { .args_ct_str = { "r", "r", "r", "ri", "ri" } };
+ return &s2;
+ }
+
+ default:
+ break;
+ }
+ return NULL;
+}
static int tcg_target_callee_save_regs[] = {
#if TCG_TARGET_REG_BITS == 64
@@ -2395,6 +2604,9 @@ static void tcg_target_init(TCGContext *s)
need to probe for it. */
have_movbe = (c & bit_MOVBE) != 0;
#endif
+#ifdef bit_POPCNT
+ have_popcnt = (c & bit_POPCNT) != 0;
+#endif
}
if (max >= 7) {
@@ -2409,6 +2621,15 @@ static void tcg_target_init(TCGContext *s)
}
#endif
+#ifndef have_lzcnt
+ max = __get_cpuid_max(0x8000000, 0);
+ if (max >= 1) {
+ __cpuid(0x80000001, a, b, c, d);
+ /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
+ have_lzcnt = (c & bit_LZCNT) != 0;
+ }
+#endif
+
if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
@@ -2433,8 +2654,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
-
- tcg_add_target_add_op_defs(x86_op_defs);
}
typedef struct {
diff --git a/tcg/ia64/tcg-target.h b/tcg/ia64/tcg-target.h
index 6dddb7f772..42aea03a8b 100644
--- a/tcg/ia64/tcg-target.h
+++ b/tcg/ia64/tcg-target.h
@@ -140,6 +140,12 @@ typedef enum {
#define TCG_TARGET_HAS_nand_i32 1
#define TCG_TARGET_HAS_nand_i64 1
#define TCG_TARGET_HAS_nor_i32 1
+#define TCG_TARGET_HAS_clz_i32 0
+#define TCG_TARGET_HAS_clz_i64 0
+#define TCG_TARGET_HAS_ctz_i32 0
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i32 0
+#define TCG_TARGET_HAS_ctpop_i64 0
#define TCG_TARGET_HAS_nor_i64 1
#define TCG_TARGET_HAS_orc_i32 1
#define TCG_TARGET_HAS_orc_i64 1
@@ -149,6 +155,10 @@ typedef enum {
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_deposit_i32 1
#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i32 0
+#define TCG_TARGET_HAS_extract_i64 0
+#define TCG_TARGET_HAS_sextract_i32 0
+#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_add2_i32 0
#define TCG_TARGET_HAS_add2_i64 0
#define TCG_TARGET_HAS_sub2_i32 0
diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c
index b04d716c3d..bf9a97d75c 100644
--- a/tcg/ia64/tcg-target.inc.c
+++ b/tcg/ia64/tcg-target.inc.c
@@ -721,12 +721,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
*/
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch(ct_str[0]) {
+ switch(*ct_str++) {
case 'r':
ct->ct |= TCG_CT_REG;
tcg_regset_set(ct->u.regs, 0xffffffffffffffffull);
@@ -750,11 +748,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
/* test if a constant matches the constraint */
@@ -2352,6 +2348,18 @@ static const TCGTargetOpDef ia64_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(ia64_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (ia64_op_defs[i].op == op) {
+ return &ia64_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
/* Generate global QEMU prologue and epilogue code */
static void tcg_target_qemu_prologue(TCGContext *s)
{
@@ -2471,6 +2479,4 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R5);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R6);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R7);
-
- tcg_add_target_add_op_defs(ia64_op_defs);
}
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index 3aeac87614..f46d64a3a7 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -27,6 +27,14 @@
#ifndef MIPS_TCG_TARGET_H
#define MIPS_TCG_TARGET_H
+#if _MIPS_SIM == _ABIO32
+# define TCG_TARGET_REG_BITS 32
+#elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
+# define TCG_TARGET_REG_BITS 64
+#else
+# error "Unknown ABI"
+#endif
+
#define TCG_TARGET_INSN_UNIT_SIZE 4
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
#define TCG_TARGET_NB_REGS 32
@@ -70,9 +78,13 @@ typedef enum {
} TCGReg;
/* used for function call generation */
-#define TCG_TARGET_STACK_ALIGN 8
-#define TCG_TARGET_CALL_STACK_OFFSET 16
-#define TCG_TARGET_CALL_ALIGN_ARGS 1
+#define TCG_TARGET_STACK_ALIGN 16
+#if _MIPS_SIM == _ABIO32
+# define TCG_TARGET_CALL_STACK_OFFSET 16
+#else
+# define TCG_TARGET_CALL_STACK_OFFSET 0
+#endif
+#define TCG_TARGET_CALL_ALIGN_ARGS 1
/* MOVN/MOVZ instructions detection */
#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
@@ -117,21 +129,71 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
#define TCG_TARGET_HAS_muluh_i32 1
#define TCG_TARGET_HAS_mulsh_i32 1
+#define TCG_TARGET_HAS_bswap32_i32 1
+
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_add2_i32 0
+#define TCG_TARGET_HAS_sub2_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 1
+#define TCG_TARGET_HAS_extrh_i64_i32 1
+#define TCG_TARGET_HAS_div_i64 1
+#define TCG_TARGET_HAS_rem_i64 1
+#define TCG_TARGET_HAS_not_i64 1
+#define TCG_TARGET_HAS_nor_i64 1
+#define TCG_TARGET_HAS_andc_i64 0
+#define TCG_TARGET_HAS_orc_i64 0
+#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_nand_i64 0
+#define TCG_TARGET_HAS_add2_i64 0
+#define TCG_TARGET_HAS_sub2_i64 0
+#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
+#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
+#define TCG_TARGET_HAS_muluh_i64 1
+#define TCG_TARGET_HAS_mulsh_i64 1
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#endif
/* optional instructions detected at runtime */
#define TCG_TARGET_HAS_movcond_i32 use_movnz_instructions
#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
-#define TCG_TARGET_HAS_bswap32_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_sextract_i32 0
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
+#define TCG_TARGET_HAS_ctz_i32 0
+#define TCG_TARGET_HAS_ctpop_i32 0
+
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_movcond_i64 use_movnz_instructions
+#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_sextract_i64 0
+#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i64 0
+#endif
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_neg_i32 0 /* sub rd, zero, rt */
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
+#if TCG_TARGET_REG_BITS == 64
+#define TCG_TARGET_HAS_neg_i64 0 /* sub rd, zero, rt */
+#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */
+#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
+#endif
+
#ifdef __OpenBSD__
#include <machine/sysarch.h>
#else
diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c
index abce6026f8..01ac7b2c81 100644
--- a/tcg/mips/tcg-target.inc.c
+++ b/tcg/mips/tcg-target.inc.c
@@ -32,8 +32,16 @@
# define MIPS_BE 0
#endif
-#define LO_OFF (MIPS_BE * 4)
-#define HI_OFF (4 - LO_OFF)
+#if TCG_TARGET_REG_BITS == 32
+# define LO_OFF (MIPS_BE * 4)
+# define HI_OFF (4 - LO_OFF)
+#else
+/* To assert at compile-time that these values are never used
+ for TCG_TARGET_REG_BITS == 64. */
+/* extern */ int link_error(void);
+# define LO_OFF link_error()
+# define HI_OFF link_error()
+#endif
#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
@@ -74,6 +82,8 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
#define TCG_TMP0 TCG_REG_AT
#define TCG_TMP1 TCG_REG_T9
+#define TCG_TMP2 TCG_REG_T8
+#define TCG_TMP3 TCG_REG_T7
/* check if we really need so many registers :P */
static const int tcg_target_reg_alloc_order[] = {
@@ -89,10 +99,6 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_S8,
/* Call clobbered registers. */
- TCG_REG_T0,
- TCG_REG_T1,
- TCG_REG_T2,
- TCG_REG_T3,
TCG_REG_T4,
TCG_REG_T5,
TCG_REG_T6,
@@ -103,17 +109,27 @@ static const int tcg_target_reg_alloc_order[] = {
TCG_REG_V0,
/* Argument registers, opposite order of allocation. */
+ TCG_REG_T3,
+ TCG_REG_T2,
+ TCG_REG_T1,
+ TCG_REG_T0,
TCG_REG_A3,
TCG_REG_A2,
TCG_REG_A1,
TCG_REG_A0,
};
-static const TCGReg tcg_target_call_iarg_regs[4] = {
+static const TCGReg tcg_target_call_iarg_regs[] = {
TCG_REG_A0,
TCG_REG_A1,
TCG_REG_A2,
- TCG_REG_A3
+ TCG_REG_A3,
+#if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
+ TCG_REG_T0,
+ TCG_REG_T1,
+ TCG_REG_T2,
+ TCG_REG_T3,
+#endif
};
static const TCGReg tcg_target_call_oarg_regs[2] = {
@@ -122,6 +138,9 @@ static const TCGReg tcg_target_call_oarg_regs[2] = {
};
static tcg_insn_unit *tb_ret_addr;
+static tcg_insn_unit *bswap32_addr;
+static tcg_insn_unit *bswap32u_addr;
+static tcg_insn_unit *bswap64_addr;
static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target)
{
@@ -160,6 +179,7 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_S16 0x400 /* Signed 16-bit: -32768 - 32767 */
#define TCG_CT_CONST_P2M1 0x800 /* Power of 2 minus 1. */
#define TCG_CT_CONST_N16 0x1000 /* "Negatable" 16-bit: -32767 - 32767 */
+#define TCG_CT_CONST_WSZ 0x2000 /* word size */
static inline bool is_p2m1(tcg_target_long val)
{
@@ -167,27 +187,20 @@ static inline bool is_p2m1(tcg_target_long val)
}
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch(ct_str[0]) {
+ switch(*ct_str++) {
case 'r':
ct->ct |= TCG_CT_REG;
tcg_regset_set(ct->u.regs, 0xffffffff);
break;
- case 'L': /* qemu_ld output arg constraint */
- ct->ct |= TCG_CT_REG;
- tcg_regset_set(ct->u.regs, 0xffffffff);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_V0);
- break;
- case 'l': /* qemu_ld input arg constraint */
+ case 'L': /* qemu_ld input arg constraint */
ct->ct |= TCG_CT_REG;
tcg_regset_set(ct->u.regs, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
}
#endif
@@ -197,11 +210,11 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set(ct->u.regs, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A0);
#if defined(CONFIG_SOFTMMU)
- if (TARGET_LONG_BITS == 32) {
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
- } else {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A2);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_A3);
+ } else {
+ tcg_regset_reset_reg(ct->u.regs, TCG_REG_A1);
}
#endif
break;
@@ -217,6 +230,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
case 'N':
ct->ct |= TCG_CT_CONST_N16;
break;
+ case 'W':
+ ct->ct |= TCG_CT_CONST_WSZ;
+ break;
case 'Z':
/* We are cheating a bit here, using the fact that the register
ZERO is also the register number 0. Hence there is no need
@@ -224,11 +240,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
/* test if a constant matches the constraint */
@@ -250,87 +264,131 @@ static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
} else if ((ct & TCG_CT_CONST_P2M1)
&& use_mips32r2_instructions && is_p2m1(val)) {
return 1;
+ } else if ((ct & TCG_CT_CONST_WSZ)
+ && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ return 1;
}
return 0;
}
/* instruction opcodes */
typedef enum {
- OPC_J = 0x02 << 26,
- OPC_JAL = 0x03 << 26,
- OPC_BEQ = 0x04 << 26,
- OPC_BNE = 0x05 << 26,
- OPC_BLEZ = 0x06 << 26,
- OPC_BGTZ = 0x07 << 26,
- OPC_ADDIU = 0x09 << 26,
- OPC_SLTI = 0x0A << 26,
- OPC_SLTIU = 0x0B << 26,
- OPC_ANDI = 0x0C << 26,
- OPC_ORI = 0x0D << 26,
- OPC_XORI = 0x0E << 26,
- OPC_LUI = 0x0F << 26,
- OPC_LB = 0x20 << 26,
- OPC_LH = 0x21 << 26,
- OPC_LW = 0x23 << 26,
- OPC_LBU = 0x24 << 26,
- OPC_LHU = 0x25 << 26,
- OPC_LWU = 0x27 << 26,
- OPC_SB = 0x28 << 26,
- OPC_SH = 0x29 << 26,
- OPC_SW = 0x2B << 26,
-
- OPC_SPECIAL = 0x00 << 26,
- OPC_SLL = OPC_SPECIAL | 0x00,
- OPC_SRL = OPC_SPECIAL | 0x02,
- OPC_ROTR = OPC_SPECIAL | (0x01 << 21) | 0x02,
- OPC_SRA = OPC_SPECIAL | 0x03,
- OPC_SLLV = OPC_SPECIAL | 0x04,
- OPC_SRLV = OPC_SPECIAL | 0x06,
- OPC_ROTRV = OPC_SPECIAL | (0x01 << 6) | 0x06,
- OPC_SRAV = OPC_SPECIAL | 0x07,
- OPC_JR_R5 = OPC_SPECIAL | 0x08,
- OPC_JALR = OPC_SPECIAL | 0x09,
- OPC_MOVZ = OPC_SPECIAL | 0x0A,
- OPC_MOVN = OPC_SPECIAL | 0x0B,
- OPC_SYNC = OPC_SPECIAL | 0x0F,
- OPC_MFHI = OPC_SPECIAL | 0x10,
- OPC_MFLO = OPC_SPECIAL | 0x12,
- OPC_MULT = OPC_SPECIAL | 0x18,
- OPC_MUL_R6 = OPC_SPECIAL | (0x02 << 6) | 0x18,
- OPC_MUH = OPC_SPECIAL | (0x03 << 6) | 0x18,
- OPC_MULTU = OPC_SPECIAL | 0x19,
- OPC_MULU = OPC_SPECIAL | (0x02 << 6) | 0x19,
- OPC_MUHU = OPC_SPECIAL | (0x03 << 6) | 0x19,
- OPC_DIV = OPC_SPECIAL | 0x1A,
- OPC_DIV_R6 = OPC_SPECIAL | (0x02 << 6) | 0x1A,
- OPC_MOD = OPC_SPECIAL | (0x03 << 6) | 0x1A,
- OPC_DIVU = OPC_SPECIAL | 0x1B,
- OPC_DIVU_R6 = OPC_SPECIAL | (0x02 << 6) | 0x1B,
- OPC_MODU = OPC_SPECIAL | (0x03 << 6) | 0x1B,
- OPC_ADDU = OPC_SPECIAL | 0x21,
- OPC_SUBU = OPC_SPECIAL | 0x23,
- OPC_AND = OPC_SPECIAL | 0x24,
- OPC_OR = OPC_SPECIAL | 0x25,
- OPC_XOR = OPC_SPECIAL | 0x26,
- OPC_NOR = OPC_SPECIAL | 0x27,
- OPC_SLT = OPC_SPECIAL | 0x2A,
- OPC_SLTU = OPC_SPECIAL | 0x2B,
- OPC_SELEQZ = OPC_SPECIAL | 0x35,
- OPC_SELNEZ = OPC_SPECIAL | 0x37,
-
- OPC_REGIMM = 0x01 << 26,
- OPC_BLTZ = OPC_REGIMM | (0x00 << 16),
- OPC_BGEZ = OPC_REGIMM | (0x01 << 16),
-
- OPC_SPECIAL2 = 0x1c << 26,
- OPC_MUL_R5 = OPC_SPECIAL2 | 0x002,
-
- OPC_SPECIAL3 = 0x1f << 26,
- OPC_EXT = OPC_SPECIAL3 | 0x000,
- OPC_INS = OPC_SPECIAL3 | 0x004,
- OPC_WSBH = OPC_SPECIAL3 | 0x0a0,
- OPC_SEB = OPC_SPECIAL3 | 0x420,
- OPC_SEH = OPC_SPECIAL3 | 0x620,
+ OPC_J = 002 << 26,
+ OPC_JAL = 003 << 26,
+ OPC_BEQ = 004 << 26,
+ OPC_BNE = 005 << 26,
+ OPC_BLEZ = 006 << 26,
+ OPC_BGTZ = 007 << 26,
+ OPC_ADDIU = 011 << 26,
+ OPC_SLTI = 012 << 26,
+ OPC_SLTIU = 013 << 26,
+ OPC_ANDI = 014 << 26,
+ OPC_ORI = 015 << 26,
+ OPC_XORI = 016 << 26,
+ OPC_LUI = 017 << 26,
+ OPC_DADDIU = 031 << 26,
+ OPC_LB = 040 << 26,
+ OPC_LH = 041 << 26,
+ OPC_LW = 043 << 26,
+ OPC_LBU = 044 << 26,
+ OPC_LHU = 045 << 26,
+ OPC_LWU = 047 << 26,
+ OPC_SB = 050 << 26,
+ OPC_SH = 051 << 26,
+ OPC_SW = 053 << 26,
+ OPC_LD = 067 << 26,
+ OPC_SD = 077 << 26,
+
+ OPC_SPECIAL = 000 << 26,
+ OPC_SLL = OPC_SPECIAL | 000,
+ OPC_SRL = OPC_SPECIAL | 002,
+ OPC_ROTR = OPC_SPECIAL | 002 | (1 << 21),
+ OPC_SRA = OPC_SPECIAL | 003,
+ OPC_SLLV = OPC_SPECIAL | 004,
+ OPC_SRLV = OPC_SPECIAL | 006,
+ OPC_ROTRV = OPC_SPECIAL | 006 | 0100,
+ OPC_SRAV = OPC_SPECIAL | 007,
+ OPC_JR_R5 = OPC_SPECIAL | 010,
+ OPC_JALR = OPC_SPECIAL | 011,
+ OPC_MOVZ = OPC_SPECIAL | 012,
+ OPC_MOVN = OPC_SPECIAL | 013,
+ OPC_SYNC = OPC_SPECIAL | 017,
+ OPC_MFHI = OPC_SPECIAL | 020,
+ OPC_MFLO = OPC_SPECIAL | 022,
+ OPC_DSLLV = OPC_SPECIAL | 024,
+ OPC_DSRLV = OPC_SPECIAL | 026,
+ OPC_DROTRV = OPC_SPECIAL | 026 | 0100,
+ OPC_DSRAV = OPC_SPECIAL | 027,
+ OPC_MULT = OPC_SPECIAL | 030,
+ OPC_MUL_R6 = OPC_SPECIAL | 030 | 0200,
+ OPC_MUH = OPC_SPECIAL | 030 | 0300,
+ OPC_MULTU = OPC_SPECIAL | 031,
+ OPC_MULU = OPC_SPECIAL | 031 | 0200,
+ OPC_MUHU = OPC_SPECIAL | 031 | 0300,
+ OPC_DIV = OPC_SPECIAL | 032,
+ OPC_DIV_R6 = OPC_SPECIAL | 032 | 0200,
+ OPC_MOD = OPC_SPECIAL | 032 | 0300,
+ OPC_DIVU = OPC_SPECIAL | 033,
+ OPC_DIVU_R6 = OPC_SPECIAL | 033 | 0200,
+ OPC_MODU = OPC_SPECIAL | 033 | 0300,
+ OPC_DMULT = OPC_SPECIAL | 034,
+ OPC_DMUL = OPC_SPECIAL | 034 | 0200,
+ OPC_DMUH = OPC_SPECIAL | 034 | 0300,
+ OPC_DMULTU = OPC_SPECIAL | 035,
+ OPC_DMULU = OPC_SPECIAL | 035 | 0200,
+ OPC_DMUHU = OPC_SPECIAL | 035 | 0300,
+ OPC_DDIV = OPC_SPECIAL | 036,
+ OPC_DDIV_R6 = OPC_SPECIAL | 036 | 0200,
+ OPC_DMOD = OPC_SPECIAL | 036 | 0300,
+ OPC_DDIVU = OPC_SPECIAL | 037,
+ OPC_DDIVU_R6 = OPC_SPECIAL | 037 | 0200,
+ OPC_DMODU = OPC_SPECIAL | 037 | 0300,
+ OPC_ADDU = OPC_SPECIAL | 041,
+ OPC_SUBU = OPC_SPECIAL | 043,
+ OPC_AND = OPC_SPECIAL | 044,
+ OPC_OR = OPC_SPECIAL | 045,
+ OPC_XOR = OPC_SPECIAL | 046,
+ OPC_NOR = OPC_SPECIAL | 047,
+ OPC_SLT = OPC_SPECIAL | 052,
+ OPC_SLTU = OPC_SPECIAL | 053,
+ OPC_DADDU = OPC_SPECIAL | 055,
+ OPC_DSUBU = OPC_SPECIAL | 057,
+ OPC_SELEQZ = OPC_SPECIAL | 065,
+ OPC_SELNEZ = OPC_SPECIAL | 067,
+ OPC_DSLL = OPC_SPECIAL | 070,
+ OPC_DSRL = OPC_SPECIAL | 072,
+ OPC_DROTR = OPC_SPECIAL | 072 | (1 << 21),
+ OPC_DSRA = OPC_SPECIAL | 073,
+ OPC_DSLL32 = OPC_SPECIAL | 074,
+ OPC_DSRL32 = OPC_SPECIAL | 076,
+ OPC_DROTR32 = OPC_SPECIAL | 076 | (1 << 21),
+ OPC_DSRA32 = OPC_SPECIAL | 077,
+ OPC_CLZ_R6 = OPC_SPECIAL | 0120,
+ OPC_DCLZ_R6 = OPC_SPECIAL | 0122,
+
+ OPC_REGIMM = 001 << 26,
+ OPC_BLTZ = OPC_REGIMM | (000 << 16),
+ OPC_BGEZ = OPC_REGIMM | (001 << 16),
+
+ OPC_SPECIAL2 = 034 << 26,
+ OPC_MUL_R5 = OPC_SPECIAL2 | 002,
+ OPC_CLZ = OPC_SPECIAL2 | 040,
+ OPC_DCLZ = OPC_SPECIAL2 | 044,
+
+ OPC_SPECIAL3 = 037 << 26,
+ OPC_EXT = OPC_SPECIAL3 | 000,
+ OPC_DEXTM = OPC_SPECIAL3 | 001,
+ OPC_DEXTU = OPC_SPECIAL3 | 002,
+ OPC_DEXT = OPC_SPECIAL3 | 003,
+ OPC_INS = OPC_SPECIAL3 | 004,
+ OPC_DINSM = OPC_SPECIAL3 | 005,
+ OPC_DINSU = OPC_SPECIAL3 | 006,
+ OPC_DINS = OPC_SPECIAL3 | 007,
+ OPC_WSBH = OPC_SPECIAL3 | 00240,
+ OPC_DSBH = OPC_SPECIAL3 | 00244,
+ OPC_DSHD = OPC_SPECIAL3 | 00544,
+ OPC_SEB = OPC_SPECIAL3 | 02040,
+ OPC_SEH = OPC_SPECIAL3 | 03040,
/* MIPS r6 doesn't have JR, JALR should be used instead */
OPC_JR = use_mips32r6_instructions ? OPC_JALR : OPC_JR_R5,
@@ -348,6 +406,12 @@ typedef enum {
OPC_SYNC_ACQUIRE = OPC_SYNC | 0x11 << 5,
OPC_SYNC_RELEASE = OPC_SYNC | 0x12 << 5,
OPC_SYNC_RMB = OPC_SYNC | 0x13 << 5,
+
+ /* Aliases for convenience. */
+ ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU,
+ ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU,
+ ALIAS_TSRL = TARGET_LONG_BITS == 32 || TCG_TARGET_REG_BITS == 32
+ ? OPC_SRL : OPC_DSRL,
} MIPSInsn;
/*
@@ -396,6 +460,21 @@ static inline void tcg_out_opc_bf(TCGContext *s, MIPSInsn opc, TCGReg rt,
tcg_out32(s, inst);
}
+static inline void tcg_out_opc_bf64(TCGContext *s, MIPSInsn opc, MIPSInsn opm,
+ MIPSInsn oph, TCGReg rt, TCGReg rs,
+ int msb, int lsb)
+{
+ if (lsb >= 32) {
+ opc = oph;
+ msb -= 32;
+ lsb -= 32;
+ } else if (msb >= 32) {
+ opc = opm;
+ msb -= 32;
+ }
+ tcg_out_opc_bf(s, opc, rt, rs, msb, lsb);
+}
+
/*
* Type branch
*/
@@ -426,6 +505,18 @@ static inline void tcg_out_opc_sa(TCGContext *s, MIPSInsn opc,
}
+static void tcg_out_opc_sa64(TCGContext *s, MIPSInsn opc1, MIPSInsn opc2,
+ TCGReg rd, TCGReg rt, TCGArg sa)
+{
+ int32_t inst;
+
+ inst = (sa & 32 ? opc2 : opc1);
+ inst |= (rt & 0x1F) << 16;
+ inst |= (rd & 0x1F) << 11;
+ inst |= (sa & 0x1F) << 6;
+ tcg_out32(s, inst);
+}
+
/*
* Type jump.
* Returns true if the branch was in range and the insn was emitted.
@@ -454,28 +545,59 @@ static inline void tcg_out_nop(TCGContext *s)
tcg_out32(s, 0);
}
+static inline void tcg_out_dsll(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
+{
+ tcg_out_opc_sa64(s, OPC_DSLL, OPC_DSLL32, rd, rt, sa);
+}
+
+static inline void tcg_out_dsrl(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
+{
+ tcg_out_opc_sa64(s, OPC_DSRL, OPC_DSRL32, rd, rt, sa);
+}
+
+static inline void tcg_out_dsra(TCGContext *s, TCGReg rd, TCGReg rt, TCGArg sa)
+{
+ tcg_out_opc_sa64(s, OPC_DSRA, OPC_DSRA32, rd, rt, sa);
+}
+
static inline void tcg_out_mov(TCGContext *s, TCGType type,
TCGReg ret, TCGReg arg)
{
/* Simple reg-reg move, optimising out the 'do nothing' case */
if (ret != arg) {
- tcg_out_opc_reg(s, OPC_ADDU, ret, arg, TCG_REG_ZERO);
+ tcg_out_opc_reg(s, OPC_OR, ret, arg, TCG_REG_ZERO);
}
}
-static inline void tcg_out_movi(TCGContext *s, TCGType type,
- TCGReg reg, tcg_target_long arg)
+static void tcg_out_movi(TCGContext *s, TCGType type,
+ TCGReg ret, tcg_target_long arg)
{
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
+ arg = (int32_t)arg;
+ }
if (arg == (int16_t)arg) {
- tcg_out_opc_imm(s, OPC_ADDIU, reg, TCG_REG_ZERO, arg);
- } else if (arg == (uint16_t)arg) {
- tcg_out_opc_imm(s, OPC_ORI, reg, TCG_REG_ZERO, arg);
+ tcg_out_opc_imm(s, OPC_ADDIU, ret, TCG_REG_ZERO, arg);
+ return;
+ }
+ if (arg == (uint16_t)arg) {
+ tcg_out_opc_imm(s, OPC_ORI, ret, TCG_REG_ZERO, arg);
+ return;
+ }
+ if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
+ tcg_out_opc_imm(s, OPC_LUI, ret, TCG_REG_ZERO, arg >> 16);
} else {
- tcg_out_opc_imm(s, OPC_LUI, reg, TCG_REG_ZERO, arg >> 16);
- if (arg & 0xffff) {
- tcg_out_opc_imm(s, OPC_ORI, reg, reg, arg & 0xffff);
+ tcg_out_movi(s, TCG_TYPE_I32, ret, arg >> 31 >> 1);
+ if (arg & 0xffff0000ull) {
+ tcg_out_dsll(s, ret, ret, 16);
+ tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg >> 16);
+ tcg_out_dsll(s, ret, ret, 16);
+ } else {
+ tcg_out_dsll(s, ret, ret, 32);
}
}
+ if (arg & 0xffff) {
+ tcg_out_opc_imm(s, OPC_ORI, ret, ret, arg & 0xffff);
+ }
}
static inline void tcg_out_bswap16(TCGContext *s, TCGReg ret, TCGReg arg)
@@ -513,29 +635,49 @@ static inline void tcg_out_bswap16s(TCGContext *s, TCGReg ret, TCGReg arg)
}
}
-static inline void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
+static void tcg_out_bswap_subr(TCGContext *s, tcg_insn_unit *sub)
+{
+ bool ok = tcg_out_opc_jmp(s, OPC_JAL, sub);
+ tcg_debug_assert(ok);
+}
+
+static void tcg_out_bswap32(TCGContext *s, TCGReg ret, TCGReg arg)
{
if (use_mips32r2_instructions) {
tcg_out_opc_reg(s, OPC_WSBH, ret, 0, arg);
tcg_out_opc_sa(s, OPC_ROTR, ret, ret, 16);
} else {
- /* ret and arg must be different and can't be register at */
- if (ret == arg || ret == TCG_TMP0 || arg == TCG_TMP0) {
- tcg_abort();
- }
-
- tcg_out_opc_sa(s, OPC_SLL, ret, arg, 24);
-
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 24);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
+ tcg_out_bswap_subr(s, bswap32_addr);
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
+ }
+}
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, arg, 0xff00);
- tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
+static void tcg_out_bswap32u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
+ tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
+ tcg_out_dsrl(s, ret, ret, 32);
+ } else {
+ tcg_out_bswap_subr(s, bswap32u_addr);
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
+ }
+}
- tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, arg, 8);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, TCG_TMP0, 0xff00);
- tcg_out_opc_reg(s, OPC_OR, ret, ret, TCG_TMP0);
+static void tcg_out_bswap64(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_DSBH, ret, 0, arg);
+ tcg_out_opc_reg(s, OPC_DSHD, ret, 0, ret);
+ } else {
+ tcg_out_bswap_subr(s, bswap64_addr);
+ /* delay slot -- never omit the insn, like tcg_out_mov might. */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP0, arg, TCG_REG_ZERO);
+ tcg_out_mov(s, TCG_TYPE_I32, ret, TCG_TMP3);
}
}
@@ -559,6 +701,16 @@ static inline void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
}
}
+static inline void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0);
+ } else {
+ tcg_out_dsll(s, ret, arg, 32);
+ tcg_out_dsrl(s, ret, ret, 32);
+ }
+}
+
static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
TCGReg addr, intptr_t ofs)
{
@@ -566,7 +718,7 @@ static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
if (ofs != lo) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo);
if (addr != TCG_REG_ZERO) {
- tcg_out_opc_reg(s, OPC_ADDU, TCG_TMP0, TCG_TMP0, addr);
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP0, TCG_TMP0, addr);
}
addr = TCG_TMP0;
}
@@ -576,13 +728,21 @@ static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data,
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
- tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
+ MIPSInsn opc = OPC_LD;
+ if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
+ opc = OPC_LW;
+ }
+ tcg_out_ldst(s, opc, arg, arg1, arg2);
}
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
TCGReg arg1, intptr_t arg2)
{
- tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
+ MIPSInsn opc = OPC_SD;
+ if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
+ opc = OPC_SW;
+ }
+ tcg_out_ldst(s, opc, arg, arg1, arg2);
}
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
@@ -595,16 +755,6 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
return false;
}
-static inline void tcg_out_addi(TCGContext *s, TCGReg reg, TCGArg val)
-{
- if (val == (int16_t)val) {
- tcg_out_opc_imm(s, OPC_ADDIU, reg, reg, val);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, val);
- tcg_out_opc_reg(s, OPC_ADDU, reg, reg, TCG_TMP0);
- }
-}
-
static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al,
TCGReg ah, TCGArg bl, TCGArg bh, bool cbl,
bool cbh, bool is_sub)
@@ -969,6 +1119,10 @@ static void * const qemu_ld_helpers[16] = {
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
[MO_BEQ] = helper_be_ldq_mmu,
+#if TCG_TARGET_REG_BITS == 64
+ [MO_LESL] = helper_le_ldsl_mmu,
+ [MO_BESL] = helper_be_ldsl_mmu,
+#endif
};
static void * const qemu_st_helpers[16] = {
@@ -996,6 +1150,9 @@ static int tcg_out_call_iarg_reg(TCGContext *s, int i, TCGReg arg)
if (i < ARRAY_SIZE(tcg_target_call_iarg_regs)) {
tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[i], arg);
} else {
+ /* For N32 and N64, the initial offset is different. But there
+ we also have 8 argument register so we don't run out here. */
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
tcg_out_st(s, TCG_TYPE_REG, arg, TCG_REG_SP, 4 * i);
}
return i + 1;
@@ -1037,6 +1194,7 @@ static int tcg_out_call_iarg_imm(TCGContext *s, int i, TCGArg arg)
static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
{
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
i = (i + 1) & ~1;
i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? ah : al));
i = tcg_out_call_iarg_reg(s, i, (MIPS_BE ? al : ah));
@@ -1044,7 +1202,7 @@ static int tcg_out_call_iarg_reg2(TCGContext *s, int i, TCGReg al, TCGReg ah)
}
/* Perform the tlb comparison operation. The complete host address is
- placed in BASE. Clobbers AT, T0, A0. */
+ placed in BASE. Clobbers TMP0, TMP1, TMP2, A0. */
static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
TCGReg addrh, TCGMemOpIdx oi,
tcg_insn_unit *label_ptr[2], bool is_load)
@@ -1052,6 +1210,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
TCGMemOp opc = get_memop(oi);
unsigned s_bits = opc & MO_SIZE;
unsigned a_bits = get_alignment_bits(opc);
+ target_ulong mask;
int mem_index = get_mmuidx(oi);
int cmp_off
= (is_load
@@ -1059,11 +1218,11 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
- tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addrl,
+ tcg_out_opc_sa(s, ALIAS_TSRL, TCG_REG_A0, addrl,
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0,
(CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
- tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
+ tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, TCG_REG_A0, TCG_AREG0);
/* Compensate for very large offsets. */
if (add_off >= 0x8000) {
@@ -1073,51 +1232,63 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
tlb_table[NB_MMU_MODES - 1][1])
> 0x7ff0 + 0x7fff);
- tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, TCG_REG_A0, 0x7ff0);
cmp_off -= 0x7ff0;
add_off -= 0x7ff0;
}
- /* Load the (low half) tlb comparator. */
- tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0,
- cmp_off + (TARGET_LONG_BITS == 64 ? LO_OFF : 0));
-
/* We don't currently support unaligned accesses.
We could do so with mips32r6. */
if (a_bits < s_bits) {
a_bits = s_bits;
}
- /* Mask the page bits, keeping the alignment bits to compare against.
- In between on 32-bit targets, load the tlb addend for the fast path. */
- tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1,
- TARGET_PAGE_MASK | ((1 << a_bits) - 1));
- if (TARGET_LONG_BITS == 32) {
- tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
+
+ mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
+
+ /* Load the (low half) tlb comparator. Mask the page bits, keeping the
+ alignment bits to compare against. */
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A0, cmp_off + LO_OFF);
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask);
+ } else {
+ tcg_out_ldst(s,
+ (TARGET_LONG_BITS == 64 ? OPC_LD
+ : TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
+ TCG_TMP0, TCG_REG_A0, cmp_off);
+ tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask);
+ /* No second compare is required here;
+ load the tlb addend for the fast path. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_REG_A0, add_off);
}
tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
+ /* Zero extend a 32-bit guest address for a 64-bit host. */
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
+ tcg_out_ext32u(s, base, addrl);
+ addrl = base;
+ }
+
label_ptr[0] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
/* Load and test the high half tlb comparator. */
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
/* delay slot */
- tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A0, cmp_off + HI_OFF);
- /* Load the tlb addend for the fast path. We can't do it earlier with
- 64-bit targets or we'll clobber a0 before reading the high half tlb
- comparator. */
- tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, add_off);
+ /* Load the tlb addend for the fast path. */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_REG_A0, add_off);
label_ptr[1] = s->code_ptr;
tcg_out_opc_br(s, OPC_BNE, addrh, TCG_TMP0);
}
/* delay slot */
- tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
+ tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl);
}
static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
+ TCGType ext,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
void *raddr, tcg_insn_unit *label_ptr[2])
@@ -1126,13 +1297,14 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
label->is_ld = is_ld;
label->oi = oi;
+ label->type = ext;
label->datalo_reg = datalo;
label->datahi_reg = datahi;
label->addrlo_reg = addrlo;
label->addrhi_reg = addrhi;
label->raddr = raddr;
label->label_ptr[0] = label_ptr[0];
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
label->label_ptr[1] = label_ptr[1];
}
}
@@ -1146,12 +1318,12 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
/* resolve label address */
reloc_pc16(l->label_ptr[0], s->code_ptr);
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
reloc_pc16(l->label_ptr[1], s->code_ptr);
}
i = 1;
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
} else {
i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
@@ -1163,7 +1335,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
v0 = l->datalo_reg;
- if ((opc & MO_SIZE) == MO_64) {
+ if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
/* We eliminated V0 from the possible output registers, so it
cannot be clobbered here. So we must move V1 first. */
if (MIPS_BE) {
@@ -1177,7 +1349,12 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
reloc_pc16(s->code_ptr, l->raddr);
tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO);
/* delay slot */
- tcg_out_mov(s, TCG_TYPE_REG, v0, TCG_REG_V0);
+ if (TCG_TARGET_REG_BITS == 64 && l->type == TCG_TYPE_I32) {
+ /* we always sign-extend 32-bit loads */
+ tcg_out_opc_sa(s, OPC_SLL, v0, TCG_REG_V0, 0);
+ } else {
+ tcg_out_opc_reg(s, OPC_OR, v0, TCG_REG_V0, TCG_REG_ZERO);
+ }
}
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
@@ -1189,12 +1366,12 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
/* resolve label address */
reloc_pc16(l->label_ptr[0], s->code_ptr);
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
reloc_pc16(l->label_ptr[1], s->code_ptr);
}
i = 1;
- if (TARGET_LONG_BITS == 64) {
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
i = tcg_out_call_iarg_reg2(s, i, l->addrlo_reg, l->addrhi_reg);
} else {
i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
@@ -1210,7 +1387,11 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
break;
case MO_64:
- i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
+ if (TCG_TARGET_REG_BITS == 32) {
+ i = tcg_out_call_iarg_reg2(s, i, l->datalo_reg, l->datahi_reg);
+ } else {
+ i = tcg_out_call_iarg_reg(s, i, l->datalo_reg);
+ }
break;
default:
tcg_abort();
@@ -1227,46 +1408,104 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
}
#endif
-static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
- TCGReg base, TCGMemOp opc)
+static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
+ TCGReg base, TCGMemOp opc, bool is_64)
{
switch (opc & (MO_SSIZE | MO_BSWAP)) {
case MO_UB:
- tcg_out_opc_imm(s, OPC_LBU, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
break;
case MO_SB:
- tcg_out_opc_imm(s, OPC_LB, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_LB, lo, base, 0);
break;
case MO_UW | MO_BSWAP:
tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
- tcg_out_bswap16(s, datalo, TCG_TMP1);
+ tcg_out_bswap16(s, lo, TCG_TMP1);
break;
case MO_UW:
- tcg_out_opc_imm(s, OPC_LHU, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_LHU, lo, base, 0);
break;
case MO_SW | MO_BSWAP:
tcg_out_opc_imm(s, OPC_LHU, TCG_TMP1, base, 0);
- tcg_out_bswap16s(s, datalo, TCG_TMP1);
+ tcg_out_bswap16s(s, lo, TCG_TMP1);
break;
case MO_SW:
- tcg_out_opc_imm(s, OPC_LH, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_LH, lo, base, 0);
break;
case MO_UL | MO_BSWAP:
- tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 0);
- tcg_out_bswap32(s, datalo, TCG_TMP1);
+ if (TCG_TARGET_REG_BITS == 64 && is_64) {
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
+ tcg_out_bswap32u(s, lo, lo);
+ } else {
+ tcg_out_bswap_subr(s, bswap32u_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, OPC_LWU, TCG_TMP0, base, 0);
+ tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
+ }
+ break;
+ }
+ /* FALLTHRU */
+ case MO_SL | MO_BSWAP:
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
+ tcg_out_bswap32(s, lo, lo);
+ } else {
+ tcg_out_bswap_subr(s, bswap32_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
+ tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_TMP3);
+ }
break;
case MO_UL:
- tcg_out_opc_imm(s, OPC_LW, datalo, base, 0);
+ if (TCG_TARGET_REG_BITS == 64 && is_64) {
+ tcg_out_opc_imm(s, OPC_LWU, lo, base, 0);
+ break;
+ }
+ /* FALLTHRU */
+ case MO_SL:
+ tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
break;
case MO_Q | MO_BSWAP:
- tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, HI_OFF);
- tcg_out_bswap32(s, datalo, TCG_TMP1);
- tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, LO_OFF);
- tcg_out_bswap32(s, datahi, TCG_TMP1);
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
+ tcg_out_bswap64(s, lo, lo);
+ } else {
+ tcg_out_bswap_subr(s, bswap64_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, OPC_LD, TCG_TMP0, base, 0);
+ tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
+ }
+ } else if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
+ tcg_out_opc_imm(s, OPC_LW, TCG_TMP1, base, 4);
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
+ } else {
+ tcg_out_bswap_subr(s, bswap32_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 0);
+ tcg_out_opc_imm(s, OPC_LW, TCG_TMP0, base, 4);
+ tcg_out_bswap_subr(s, bswap32_addr);
+ /* delay slot */
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
+ }
break;
case MO_Q:
- tcg_out_opc_imm(s, OPC_LW, datalo, base, LO_OFF);
- tcg_out_opc_imm(s, OPC_LW, datahi, base, HI_OFF);
+ /* Prefer to load from offset 0 first, but allow for overlap. */
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
+ } else if (MIPS_BE ? hi != base : lo == base) {
+ tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
+ tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
+ } else {
+ tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF);
+ tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF);
+ }
break;
default:
tcg_abort();
@@ -1282,69 +1521,94 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#endif
- /* Note that we've eliminated V0 from the output registers,
- so we won't overwrite the base register during loading. */
- TCGReg base = TCG_REG_V0;
+ TCGReg base = TCG_REG_A0;
data_regl = *args++;
- data_regh = (is_64 ? *args++ : 0);
+ data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
addr_regl = *args++;
- addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
+ addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
- add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
+ add_qemu_ldst_label(s, 1, oi,
+ (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
+ tcg_out_ext32u(s, base, addr_regl);
+ addr_regl = base;
+ }
if (guest_base == 0 && data_regl != addr_regl) {
base = addr_regl;
} else if (guest_base == (int16_t)guest_base) {
- tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
+ tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
} else {
tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
- tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
+ tcg_out_opc_reg(s, ALIAS_PADD, base, base, addr_regl);
}
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
#endif
}
-static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
+static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, TCGMemOp opc)
{
+ /* Don't clutter the code below with checks to avoid bswapping ZERO. */
+ if ((lo | hi) == 0) {
+ opc &= ~MO_BSWAP;
+ }
+
switch (opc & (MO_SIZE | MO_BSWAP)) {
case MO_8:
- tcg_out_opc_imm(s, OPC_SB, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
break;
case MO_16 | MO_BSWAP:
- tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, datalo, 0xffff);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, lo, 0xffff);
tcg_out_bswap16(s, TCG_TMP1, TCG_TMP1);
- datalo = TCG_TMP1;
+ lo = TCG_TMP1;
/* FALLTHRU */
case MO_16:
- tcg_out_opc_imm(s, OPC_SH, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_SH, lo, base, 0);
break;
case MO_32 | MO_BSWAP:
- tcg_out_bswap32(s, TCG_TMP1, datalo);
- datalo = TCG_TMP1;
+ tcg_out_bswap32(s, TCG_TMP3, lo);
+ lo = TCG_TMP3;
/* FALLTHRU */
case MO_32:
- tcg_out_opc_imm(s, OPC_SW, datalo, base, 0);
+ tcg_out_opc_imm(s, OPC_SW, lo, base, 0);
break;
case MO_64 | MO_BSWAP:
- tcg_out_bswap32(s, TCG_TMP1, datalo);
- tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, HI_OFF);
- tcg_out_bswap32(s, TCG_TMP1, datahi);
- tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, LO_OFF);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_bswap64(s, TCG_TMP3, lo);
+ tcg_out_opc_imm(s, OPC_SD, TCG_TMP3, base, 0);
+ } else if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? lo : hi);
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? hi : lo);
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
+ tcg_out_opc_imm(s, OPC_SW, TCG_TMP0, base, 0);
+ tcg_out_opc_imm(s, OPC_SW, TCG_TMP1, base, 4);
+ } else {
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi);
+ tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 0);
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo);
+ tcg_out_opc_imm(s, OPC_SW, TCG_TMP3, base, 4);
+ }
break;
case MO_64:
- tcg_out_opc_imm(s, OPC_SW, datalo, base, LO_OFF);
- tcg_out_opc_imm(s, OPC_SW, datahi, base, HI_OFF);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_opc_imm(s, OPC_SD, lo, base, 0);
+ } else {
+ tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? hi : lo, base, 0);
+ tcg_out_opc_imm(s, OPC_SW, MIPS_BE ? lo : hi, base, 4);
+ }
break;
default:
@@ -1355,39 +1619,41 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
- TCGReg data_regl, data_regh, base;
+ TCGReg data_regl, data_regh;
TCGMemOpIdx oi;
TCGMemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
#endif
+ TCGReg base = TCG_REG_A0;
data_regl = *args++;
- data_regh = (is_64 ? *args++ : 0);
+ data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
addr_regl = *args++;
- addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
+ addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
oi = *args++;
opc = get_memop(oi);
#if defined(CONFIG_SOFTMMU)
- /* Note that we eliminated the helper's address argument,
- so we can reuse that for the base. */
- base = (TARGET_LONG_BITS == 32 ? TCG_REG_A1 : TCG_REG_A2);
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
- add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
+ add_qemu_ldst_label(s, 0, oi,
+ (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
+ data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else
+ base = TCG_REG_A0;
+ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
+ tcg_out_ext32u(s, base, addr_regl);
+ addr_regl = base;
+ }
if (guest_base == 0) {
base = addr_regl;
+ } else if (guest_base == (int16_t)guest_base) {
+ tcg_out_opc_imm(s, ALIAS_PADDI, base, addr_regl, guest_base);
} else {
- base = TCG_REG_A0;
- if (guest_base == (int16_t)guest_base) {
- tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
- tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl);
- }
+ tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base);
+ tcg_out_opc_reg(s, ALIAS_PADD, base, base, addr_regl);
}
tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
#endif
@@ -1409,6 +1675,33 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
tcg_out32(s, sync[a0 & TCG_MO_ALL]);
}
+static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
+ int width, TCGReg a0, TCGReg a1, TCGArg a2)
+{
+ if (use_mips32r6_instructions) {
+ if (a2 == width) {
+ tcg_out_opc_reg(s, opcv6, a0, a1, 0);
+ } else {
+ tcg_out_opc_reg(s, opcv6, TCG_TMP0, a1, 0);
+ tcg_out_movcond(s, TCG_COND_EQ, a0, a1, 0, a2, TCG_TMP0);
+ }
+ } else {
+ if (a2 == width) {
+ tcg_out_opc_reg(s, opcv2, a0, a1, a1);
+ } else if (a0 == a2) {
+ tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
+ tcg_out_opc_reg(s, OPC_MOVN, a0, TCG_TMP0, a1);
+ } else if (a0 != a1) {
+ tcg_out_opc_reg(s, opcv2, a0, a1, a1);
+ tcg_out_opc_reg(s, OPC_MOVZ, a0, a2, a1);
+ } else {
+ tcg_out_opc_reg(s, opcv2, TCG_TMP0, a1, a1);
+ tcg_out_opc_reg(s, OPC_MOVZ, TCG_TMP0, a2, a1);
+ tcg_out_mov(s, TCG_TYPE_REG, a0, TCG_TMP0);
+ }
+ }
+}
+
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg *args, const int *const_args)
{
@@ -1426,6 +1719,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
{
TCGReg b0 = TCG_REG_ZERO;
+ a0 = (intptr_t)a0;
if (a0 & ~0xffff) {
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
b0 = TCG_REG_V0;
@@ -1459,28 +1753,45 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
i1 = OPC_LBU;
goto do_ldst;
case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
i1 = OPC_LB;
goto do_ldst;
case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
i1 = OPC_LHU;
goto do_ldst;
case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
i1 = OPC_LH;
goto do_ldst;
case INDEX_op_ld_i32:
+ case INDEX_op_ld32s_i64:
i1 = OPC_LW;
goto do_ldst;
+ case INDEX_op_ld32u_i64:
+ i1 = OPC_LWU;
+ goto do_ldst;
+ case INDEX_op_ld_i64:
+ i1 = OPC_LD;
+ goto do_ldst;
case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
i1 = OPC_SB;
goto do_ldst;
case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
i1 = OPC_SH;
goto do_ldst;
case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
i1 = OPC_SW;
+ goto do_ldst;
+ case INDEX_op_st_i64:
+ i1 = OPC_SD;
do_ldst:
tcg_out_ldst(s, i1, a0, a1, a2);
break;
@@ -1488,10 +1799,15 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_add_i32:
i1 = OPC_ADDU, i2 = OPC_ADDIU;
goto do_binary;
+ case INDEX_op_add_i64:
+ i1 = OPC_DADDU, i2 = OPC_DADDIU;
+ goto do_binary;
case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
i1 = OPC_OR, i2 = OPC_ORI;
goto do_binary;
case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
i1 = OPC_XOR, i2 = OPC_XORI;
do_binary:
if (c2) {
@@ -1503,12 +1819,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_sub_i32:
+ i1 = OPC_SUBU, i2 = OPC_ADDIU;
+ goto do_subtract;
+ case INDEX_op_sub_i64:
+ i1 = OPC_DSUBU, i2 = OPC_DADDIU;
+ do_subtract:
if (c2) {
- tcg_out_opc_imm(s, OPC_ADDIU, a0, a1, -a2);
+ tcg_out_opc_imm(s, i2, a0, a1, -a2);
break;
}
- i1 = OPC_SUBU;
- goto do_binary;
+ goto do_binaryv;
case INDEX_op_and_i32:
if (c2 && a2 != (uint16_t)a2) {
int msb = ctz32(~a2) - 1;
@@ -1519,7 +1839,18 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
i1 = OPC_AND, i2 = OPC_ANDI;
goto do_binary;
+ case INDEX_op_and_i64:
+ if (c2 && a2 != (uint16_t)a2) {
+ int msb = ctz64(~a2) - 1;
+ tcg_debug_assert(use_mips32r2_instructions);
+ tcg_debug_assert(is_p2m1(a2));
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1, msb, 0);
+ break;
+ }
+ i1 = OPC_AND, i2 = OPC_ANDI;
+ goto do_binary;
case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
i1 = OPC_NOR;
goto do_binaryv;
@@ -1571,6 +1902,55 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
}
i1 = OPC_DIVU, i2 = OPC_MFHI;
+ goto do_hilo1;
+ case INDEX_op_mul_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DMUL, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DMULT, i2 = OPC_MFLO;
+ goto do_hilo1;
+ case INDEX_op_mulsh_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DMUH, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DMULT, i2 = OPC_MFHI;
+ goto do_hilo1;
+ case INDEX_op_muluh_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DMUHU, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DMULTU, i2 = OPC_MFHI;
+ goto do_hilo1;
+ case INDEX_op_div_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DDIV_R6, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DDIV, i2 = OPC_MFLO;
+ goto do_hilo1;
+ case INDEX_op_divu_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DDIVU_R6, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DDIVU, i2 = OPC_MFLO;
+ goto do_hilo1;
+ case INDEX_op_rem_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DMOD, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DDIV, i2 = OPC_MFHI;
+ goto do_hilo1;
+ case INDEX_op_remu_i64:
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_reg(s, OPC_DMODU, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DDIVU, i2 = OPC_MFHI;
do_hilo1:
tcg_out_opc_reg(s, i1, 0, a1, a2);
tcg_out_opc_reg(s, i2, a0, 0, 0);
@@ -1581,6 +1961,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
goto do_hilo2;
case INDEX_op_mulu2_i32:
i1 = OPC_MULTU;
+ goto do_hilo2;
+ case INDEX_op_muls2_i64:
+ i1 = OPC_DMULT;
+ goto do_hilo2;
+ case INDEX_op_mulu2_i64:
+ i1 = OPC_DMULTU;
do_hilo2:
tcg_out_opc_reg(s, i1, 0, a2, args[3]);
tcg_out_opc_reg(s, OPC_MFLO, a0, 0, 0);
@@ -1588,20 +1974,46 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
i1 = OPC_NOR;
goto do_unary;
case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
i1 = OPC_WSBH;
goto do_unary;
case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
i1 = OPC_SEB;
goto do_unary;
case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
i1 = OPC_SEH;
do_unary:
tcg_out_opc_reg(s, i1, a0, TCG_REG_ZERO, a1);
break;
+ case INDEX_op_bswap32_i32:
+ tcg_out_bswap32(s, a0, a1);
+ break;
+ case INDEX_op_bswap32_i64:
+ tcg_out_bswap32u(s, a0, a1);
+ break;
+ case INDEX_op_bswap64_i64:
+ tcg_out_bswap64(s, a0, a1);
+ break;
+ case INDEX_op_extrh_i64_i32:
+ tcg_out_dsra(s, a0, a1, 32);
+ break;
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ tcg_out_opc_sa(s, OPC_SLL, a0, a1, 0);
+ break;
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ tcg_out_ext32u(s, a0, a1);
+ break;
+
case INDEX_op_sar_i32:
i1 = OPC_SRAV, i2 = OPC_SRA;
goto do_shift;
@@ -1616,9 +2028,10 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
do_shift:
if (c2) {
tcg_out_opc_sa(s, i2, a0, a1, a2);
- } else {
- tcg_out_opc_reg(s, i1, a0, a2, a1);
+ break;
}
+ do_shiftv:
+ tcg_out_opc_reg(s, i1, a0, a2, a1);
break;
case INDEX_op_rotl_i32:
if (c2) {
@@ -1628,17 +2041,67 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_opc_reg(s, OPC_ROTRV, a0, TCG_TMP0, a1);
}
break;
+ case INDEX_op_sar_i64:
+ if (c2) {
+ tcg_out_dsra(s, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DSRAV;
+ goto do_shiftv;
+ case INDEX_op_shl_i64:
+ if (c2) {
+ tcg_out_dsll(s, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DSLLV;
+ goto do_shiftv;
+ case INDEX_op_shr_i64:
+ if (c2) {
+ tcg_out_dsrl(s, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DSRLV;
+ goto do_shiftv;
+ case INDEX_op_rotr_i64:
+ if (c2) {
+ tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, a2);
+ break;
+ }
+ i1 = OPC_DROTRV;
+ goto do_shiftv;
+ case INDEX_op_rotl_i64:
+ if (c2) {
+ tcg_out_opc_sa64(s, OPC_DROTR, OPC_DROTR32, a0, a1, 64 - a2);
+ } else {
+ tcg_out_opc_reg(s, OPC_DSUBU, TCG_TMP0, TCG_REG_ZERO, a2);
+ tcg_out_opc_reg(s, OPC_DROTRV, a0, TCG_TMP0, a1);
+ }
+ break;
- case INDEX_op_bswap32_i32:
- tcg_out_opc_reg(s, OPC_WSBH, a0, 0, a1);
- tcg_out_opc_sa(s, OPC_ROTR, a0, a0, 16);
+ case INDEX_op_clz_i32:
+ tcg_out_clz(s, OPC_CLZ, OPC_CLZ_R6, 32, a0, a1, a2);
+ break;
+ case INDEX_op_clz_i64:
+ tcg_out_clz(s, OPC_DCLZ, OPC_DCLZ_R6, 64, a0, a1, a2);
break;
case INDEX_op_deposit_i32:
tcg_out_opc_bf(s, OPC_INS, a0, a2, args[3] + args[4] - 1, args[3]);
break;
+ case INDEX_op_deposit_i64:
+ tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
+ args[3] + args[4] - 1, args[3]);
+ break;
+ case INDEX_op_extract_i32:
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, a2 + args[3] - 1, a2);
+ break;
+ case INDEX_op_extract_i64:
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1,
+ a2 + args[3] - 1, a2);
+ break;
case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
break;
case INDEX_op_brcond2_i32:
@@ -1646,10 +2109,12 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
tcg_out_movcond(s, args[5], a0, a1, a2, args[3], args[4]);
break;
case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
tcg_out_setcond(s, args[3], a0, a1, a2);
break;
case INDEX_op_setcond2_i32:
@@ -1682,7 +2147,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_mb(s, a0);
break;
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
+ case INDEX_op_movi_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
default:
tcg_abort();
@@ -1728,6 +2195,7 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ INDEX_op_sar_i32, { "r", "rZ", "ri" } },
{ INDEX_op_rotr_i32, { "r", "rZ", "ri" } },
{ INDEX_op_rotl_i32, { "r", "rZ", "ri" } },
+ { INDEX_op_clz_i32, { "r", "r", "rWZ" } },
{ INDEX_op_bswap16_i32, { "r", "r" } },
{ INDEX_op_bswap32_i32, { "r", "r" } },
@@ -1736,6 +2204,7 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ INDEX_op_ext16s_i32, { "r", "rZ" } },
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
+ { INDEX_op_extract_i32, { "r", "r" } },
{ INDEX_op_brcond_i32, { "rZ", "rZ" } },
#if use_mips32r6_instructions
@@ -1744,21 +2213,91 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ INDEX_op_movcond_i32, { "r", "rZ", "rZ", "rZ", "0" } },
#endif
{ INDEX_op_setcond_i32, { "r", "rZ", "rZ" } },
- { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
+#if TCG_TARGET_REG_BITS == 32
{ INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
{ INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rN", "rN" } },
+ { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rZ", "rZ" } },
{ INDEX_op_brcond2_i32, { "rZ", "rZ", "rZ", "rZ" } },
+#endif
-#if TARGET_LONG_BITS == 32
- { INDEX_op_qemu_ld_i32, { "L", "lZ" } },
+#if TCG_TARGET_REG_BITS == 64
+ { INDEX_op_ld8u_i64, { "r", "r" } },
+ { INDEX_op_ld8s_i64, { "r", "r" } },
+ { INDEX_op_ld16u_i64, { "r", "r" } },
+ { INDEX_op_ld16s_i64, { "r", "r" } },
+ { INDEX_op_ld32s_i64, { "r", "r" } },
+ { INDEX_op_ld32u_i64, { "r", "r" } },
+ { INDEX_op_ld_i64, { "r", "r" } },
+ { INDEX_op_st8_i64, { "rZ", "r" } },
+ { INDEX_op_st16_i64, { "rZ", "r" } },
+ { INDEX_op_st32_i64, { "rZ", "r" } },
+ { INDEX_op_st_i64, { "rZ", "r" } },
+
+ { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
+ { INDEX_op_mul_i64, { "r", "rZ", "rZ" } },
+#if !use_mips32r6_instructions
+ { INDEX_op_muls2_i64, { "r", "r", "rZ", "rZ" } },
+ { INDEX_op_mulu2_i64, { "r", "r", "rZ", "rZ" } },
+#endif
+ { INDEX_op_mulsh_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_muluh_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_div_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_divu_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_rem_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_remu_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_sub_i64, { "r", "rZ", "rN" } },
+
+ { INDEX_op_and_i64, { "r", "rZ", "rIK" } },
+ { INDEX_op_nor_i64, { "r", "rZ", "rZ" } },
+ { INDEX_op_not_i64, { "r", "rZ" } },
+ { INDEX_op_or_i64, { "r", "rZ", "rI" } },
+ { INDEX_op_xor_i64, { "r", "rZ", "rI" } },
+
+ { INDEX_op_shl_i64, { "r", "rZ", "ri" } },
+ { INDEX_op_shr_i64, { "r", "rZ", "ri" } },
+ { INDEX_op_sar_i64, { "r", "rZ", "ri" } },
+ { INDEX_op_rotr_i64, { "r", "rZ", "ri" } },
+ { INDEX_op_rotl_i64, { "r", "rZ", "ri" } },
+ { INDEX_op_clz_i64, { "r", "r", "rWZ" } },
+
+ { INDEX_op_bswap16_i64, { "r", "r" } },
+ { INDEX_op_bswap32_i64, { "r", "r" } },
+ { INDEX_op_bswap64_i64, { "r", "r" } },
+
+ { INDEX_op_ext8s_i64, { "r", "rZ" } },
+ { INDEX_op_ext16s_i64, { "r", "rZ" } },
+ { INDEX_op_ext32s_i64, { "r", "rZ" } },
+ { INDEX_op_ext32u_i64, { "r", "rZ" } },
+ { INDEX_op_ext_i32_i64, { "r", "rZ" } },
+ { INDEX_op_extu_i32_i64, { "r", "rZ" } },
+ { INDEX_op_extrl_i64_i32, { "r", "rZ" } },
+ { INDEX_op_extrh_i64_i32, { "r", "rZ" } },
+
+ { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
+ { INDEX_op_extract_i64, { "r", "r" } },
+
+ { INDEX_op_brcond_i64, { "rZ", "rZ" } },
+#if use_mips32r6_instructions
+ { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "rZ" } },
+#else
+ { INDEX_op_movcond_i64, { "r", "rZ", "rZ", "rZ", "0" } },
+#endif
+ { INDEX_op_setcond_i64, { "r", "rZ", "rZ" } },
+
+ { INDEX_op_qemu_ld_i32, { "r", "LZ" } },
{ INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
- { INDEX_op_qemu_ld_i64, { "L", "L", "lZ" } },
+ { INDEX_op_qemu_ld_i64, { "r", "LZ" } },
+ { INDEX_op_qemu_st_i64, { "SZ", "SZ" } },
+#elif TARGET_LONG_BITS == 32
+ { INDEX_op_qemu_ld_i32, { "r", "LZ" } },
+ { INDEX_op_qemu_st_i32, { "SZ", "SZ" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r", "LZ" } },
{ INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ" } },
#else
- { INDEX_op_qemu_ld_i32, { "L", "lZ", "lZ" } },
+ { INDEX_op_qemu_ld_i32, { "r", "LZ", "LZ" } },
{ INDEX_op_qemu_st_i32, { "SZ", "SZ", "SZ" } },
- { INDEX_op_qemu_ld_i64, { "L", "L", "lZ", "lZ" } },
+ { INDEX_op_qemu_ld_i64, { "r", "r", "LZ", "LZ" } },
{ INDEX_op_qemu_st_i64, { "SZ", "SZ", "SZ", "SZ" } },
#endif
@@ -1766,6 +2305,18 @@ static const TCGTargetOpDef mips_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(mips_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (mips_op_defs[i].op == op) {
+ return &mips_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
static int tcg_target_callee_save_regs[] = {
TCG_REG_S0, /* used for the global env (TCG_AREG0) */
TCG_REG_S1,
@@ -1858,47 +2409,186 @@ static void tcg_target_detect_isa(void)
sigaction(SIGILL, &sa_old, NULL);
}
+static tcg_insn_unit *align_code_ptr(TCGContext *s)
+{
+ uintptr_t p = (uintptr_t)s->code_ptr;
+ if (p & 15) {
+ p = (p + 15) & -16;
+ s->code_ptr = (void *)p;
+ }
+ return s->code_ptr;
+}
+
+/* Stack frame parameters. */
+#define REG_SIZE (TCG_TARGET_REG_BITS / 8)
+#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
+#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
+
+#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & -TCG_TARGET_STACK_ALIGN)
+#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
+
+/* We're expecting to be able to use an immediate for frame allocation. */
+QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7fff);
+
/* Generate global QEMU prologue and epilogue code */
static void tcg_target_qemu_prologue(TCGContext *s)
{
- int i, frame_size;
+ int i;
- /* reserve some stack space, also for TCG temps. */
- frame_size = ARRAY_SIZE(tcg_target_callee_save_regs) * 4
- + TCG_STATIC_CALL_ARGS_SIZE
- + CPU_TEMP_BUF_NLONGS * sizeof(long);
- frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
- ~(TCG_TARGET_STACK_ALIGN - 1);
- tcg_set_frame(s, TCG_REG_SP, ARRAY_SIZE(tcg_target_callee_save_regs) * 4
- + TCG_STATIC_CALL_ARGS_SIZE,
- CPU_TEMP_BUF_NLONGS * sizeof(long));
+ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
/* TB prologue */
- tcg_out_addi(s, TCG_REG_SP, -frame_size);
- for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
- tcg_out_st(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
- TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
+ TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
}
/* Call generated code */
tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0);
+ /* delay slot */
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
- tb_ret_addr = s->code_ptr;
/* TB epilogue */
- for(i = 0 ; i < ARRAY_SIZE(tcg_target_callee_save_regs) ; i++) {
- tcg_out_ld(s, TCG_TYPE_I32, tcg_target_callee_save_regs[i],
- TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE + i * 4);
+ tb_ret_addr = s->code_ptr;
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
+ TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
}
tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
- tcg_out_addi(s, TCG_REG_SP, frame_size);
+ /* delay slot */
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
+
+ if (use_mips32r2_instructions) {
+ return;
+ }
+
+ /* Bswap subroutines: Input in TCG_TMP0, output in TCG_TMP3;
+ clobbers TCG_TMP1, TCG_TMP2. */
+
+ /*
+ * bswap32 -- 32-bit swap (signed result for mips64). a0 = abcd.
+ */
+ bswap32_addr = align_code_ptr(s);
+ /* t3 = (ssss)d000 */
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP3, TCG_TMP0, 24);
+ /* t1 = 000a */
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 24);
+ /* t2 = 00c0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
+ /* t3 = d00a */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 0abc */
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8);
+ /* t2 = 0c00 */
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8);
+ /* t1 = 00b0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
+ /* t3 = dc0a */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
+ /* t3 = dcba -- delay slot */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ return;
+ }
+
+ /*
+ * bswap32u -- unsigned 32-bit swap. a0 = ....abcd.
+ */
+ bswap32u_addr = align_code_ptr(s);
+ /* t1 = (0000)000d */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP0, 0xff);
+ /* t3 = 000a */
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, TCG_TMP0, 24);
+ /* t1 = (0000)d000 */
+ tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24);
+ /* t2 = 00c0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
+ /* t3 = d00a */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 0abc */
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP1, TCG_TMP0, 8);
+ /* t2 = 0c00 */
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP2, TCG_TMP2, 8);
+ /* t1 = 00b0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
+ /* t3 = dc0a */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
+ /* t3 = dcba -- delay slot */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+
+ /*
+ * bswap64 -- 64-bit swap. a0 = abcdefgh
+ */
+ bswap64_addr = align_code_ptr(s);
+ /* t3 = h0000000 */
+ tcg_out_dsll(s, TCG_TMP3, TCG_TMP0, 56);
+ /* t1 = 0000000a */
+ tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 56);
+
+ /* t2 = 000000g0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP0, 0xff00);
+ /* t3 = h000000a */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 00000abc */
+ tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 40);
+ /* t2 = 0g000000 */
+ tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40);
+ /* t1 = 000000b0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
+
+ /* t3 = hg00000a */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ /* t2 = 0000abcd */
+ tcg_out_dsrl(s, TCG_TMP2, TCG_TMP0, 32);
+ /* t3 = hg0000ba */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+
+ /* t1 = 000000c0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP2, 0xff00);
+ /* t2 = 0000000d */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP2, 0x00ff);
+ /* t1 = 00000c00 */
+ tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 8);
+ /* t2 = 0000d000 */
+ tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 24);
+
+ /* t3 = hg000cba */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
+ /* t1 = 00abcdef */
+ tcg_out_dsrl(s, TCG_TMP1, TCG_TMP0, 16);
+ /* t3 = hg00dcba */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+
+ /* t2 = 0000000f */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP2, TCG_TMP1, 0x00ff);
+ /* t1 = 000000e0 */
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP1, TCG_TMP1, 0xff00);
+ /* t2 = 00f00000 */
+ tcg_out_dsll(s, TCG_TMP2, TCG_TMP2, 40);
+ /* t1 = 000e0000 */
+ tcg_out_dsll(s, TCG_TMP1, TCG_TMP1, 24);
+
+ /* t3 = hgf0dcba */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP2);
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0);
+ /* t3 = hgfedcba -- delay slot */
+ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
}
static void tcg_target_init(TCGContext *s)
{
tcg_target_detect_isa();
tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I32], 0xffffffff);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_regset_set(tcg_target_available_regs[TCG_TYPE_I64], 0xffffffff);
+ }
tcg_regset_set(tcg_target_call_clobber_regs,
(1 << TCG_REG_V0) |
(1 << TCG_REG_V1) |
@@ -1923,11 +2613,11 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_K1); /* kernel use only */
tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* internal use */
tcg_regset_set_reg(s->reserved_regs, TCG_TMP1); /* internal use */
+ tcg_regset_set_reg(s->reserved_regs, TCG_TMP2); /* internal use */
+ tcg_regset_set_reg(s->reserved_regs, TCG_TMP3); /* internal use */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
-
- tcg_add_target_add_op_defs(mips_op_defs);
}
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
@@ -1935,3 +2625,47 @@ void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
flush_icache_range(jmp_addr, jmp_addr + 4);
}
+
+typedef struct {
+ DebugFrameHeader h;
+ uint8_t fde_def_cfa[4];
+ uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
+} DebugFrame;
+
+#define ELF_HOST_MACHINE EM_MIPS
+/* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
+ which is good because they're really quite complicated for MIPS. */
+
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
+ .h.cie.return_column = TCG_REG_RA,
+
+ /* Total FDE size does not include the "len" member. */
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
+
+ .fde_def_cfa = {
+ 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde_reg_ofs = {
+ 0x80 + 16, 9, /* DW_CFA_offset, s0, -72 */
+ 0x80 + 17, 8, /* DW_CFA_offset, s2, -64 */
+ 0x80 + 18, 7, /* DW_CFA_offset, s3, -56 */
+ 0x80 + 19, 6, /* DW_CFA_offset, s4, -48 */
+ 0x80 + 20, 5, /* DW_CFA_offset, s5, -40 */
+ 0x80 + 21, 4, /* DW_CFA_offset, s6, -32 */
+ 0x80 + 22, 3, /* DW_CFA_offset, s7, -24 */
+ 0x80 + 30, 2, /* DW_CFA_offset, s8, -16 */
+ 0x80 + 31, 1, /* DW_CFA_offset, ra, -8 */
+ }
+};
+
+void tcg_register_jit(void *buf, size_t buf_size)
+{
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 0f1349086b..adfc56ce62 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -296,6 +296,24 @@ static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
CASE_OP_32_64(nor):
return ~(x | y);
+ case INDEX_op_clz_i32:
+ return (uint32_t)x ? clz32(x) : y;
+
+ case INDEX_op_clz_i64:
+ return x ? clz64(x) : y;
+
+ case INDEX_op_ctz_i32:
+ return (uint32_t)x ? ctz32(x) : y;
+
+ case INDEX_op_ctz_i64:
+ return x ? ctz64(x) : y;
+
+ case INDEX_op_ctpop_i32:
+ return ctpop32(x);
+
+ case INDEX_op_ctpop_i64:
+ return ctpop64(x);
+
CASE_OP_32_64(ext8s):
return (int8_t)x;
@@ -878,11 +896,41 @@ void tcg_optimize(TCGContext *s)
temps[args[2]].mask);
break;
+ CASE_OP_32_64(extract):
+ mask = extract64(temps[args[1]].mask, args[2], args[3]);
+ if (args[2] == 0) {
+ affected = temps[args[1]].mask & ~mask;
+ }
+ break;
+ CASE_OP_32_64(sextract):
+ mask = sextract64(temps[args[1]].mask, args[2], args[3]);
+ if (args[2] == 0 && (tcg_target_long)mask >= 0) {
+ affected = temps[args[1]].mask & ~mask;
+ }
+ break;
+
CASE_OP_32_64(or):
CASE_OP_32_64(xor):
mask = temps[args[1]].mask | temps[args[2]].mask;
break;
+ case INDEX_op_clz_i32:
+ case INDEX_op_ctz_i32:
+ mask = temps[args[2]].mask | 31;
+ break;
+
+ case INDEX_op_clz_i64:
+ case INDEX_op_ctz_i64:
+ mask = temps[args[2]].mask | 63;
+ break;
+
+ case INDEX_op_ctpop_i32:
+ mask = 32 | 31;
+ break;
+ case INDEX_op_ctpop_i64:
+ mask = 64 | 63;
+ break;
+
CASE_OP_32_64(setcond):
case INDEX_op_setcond2_i32:
mask = 1;
@@ -996,6 +1044,7 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(ext8u):
CASE_OP_32_64(ext16s):
CASE_OP_32_64(ext16u):
+ CASE_OP_32_64(ctpop):
case INDEX_op_ext32s_i64:
case INDEX_op_ext32u_i64:
case INDEX_op_ext_i32_i64:
@@ -1039,6 +1088,20 @@ void tcg_optimize(TCGContext *s)
}
goto do_default;
+ CASE_OP_32_64(clz):
+ CASE_OP_32_64(ctz):
+ if (temp_is_const(args[1])) {
+ TCGArg v = temps[args[1]].val;
+ if (v != 0) {
+ tmp = do_constant_folding(opc, v, 0);
+ tcg_opt_gen_movi(s, op, args, args[0], tmp);
+ } else {
+ tcg_opt_gen_mov(s, op, args, args[0], args[2]);
+ }
+ break;
+ }
+ goto do_default;
+
CASE_OP_32_64(deposit):
if (temp_is_const(args[1]) && temp_is_const(args[2])) {
tmp = deposit64(temps[args[1]].val, args[3], args[4],
@@ -1048,6 +1111,22 @@ void tcg_optimize(TCGContext *s)
}
goto do_default;
+ CASE_OP_32_64(extract):
+ if (temp_is_const(args[1])) {
+ tmp = extract64(temps[args[1]].val, args[2], args[3]);
+ tcg_opt_gen_movi(s, op, args, args[0], tmp);
+ break;
+ }
+ goto do_default;
+
+ CASE_OP_32_64(sextract):
+ if (temp_is_const(args[1])) {
+ tmp = sextract64(temps[args[1]].val, args[2], args[3]);
+ tcg_opt_gen_movi(s, op, args, args[0], tmp);
+ break;
+ }
+ goto do_default;
+
CASE_OP_32_64(setcond):
tmp = do_constant_folding_cond(opc, args[1], args[2], args[3]);
if (tmp != 2) {
@@ -1076,6 +1155,21 @@ void tcg_optimize(TCGContext *s)
tcg_opt_gen_mov(s, op, args, args[0], args[4-tmp]);
break;
}
+ if (temp_is_const(args[3]) && temp_is_const(args[4])) {
+ tcg_target_ulong tv = temps[args[3]].val;
+ tcg_target_ulong fv = temps[args[4]].val;
+ TCGCond cond = args[5];
+ if (fv == 1 && tv == 0) {
+ cond = tcg_invert_cond(cond);
+ } else if (!(tv == 1 && fv == 0)) {
+ goto do_default;
+ }
+ args[3] = cond;
+ op->opc = opc = (opc == INDEX_op_movcond_i32
+ ? INDEX_op_setcond_i32
+ : INDEX_op_setcond_i64);
+ nb_iargs = 2;
+ }
goto do_default;
case INDEX_op_add2_i32:
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index dd032f286b..abd8b3d6cd 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -49,6 +49,9 @@ typedef enum {
TCG_AREG0 = TCG_REG_R27
} TCGReg;
+extern bool have_isa_2_06;
+extern bool have_isa_3_00;
+
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */
#define TCG_TARGET_HAS_ext16u_i32 0
@@ -68,7 +71,12 @@ typedef enum {
#define TCG_TARGET_HAS_eqv_i32 1
#define TCG_TARGET_HAS_nand_i32 1
#define TCG_TARGET_HAS_nor_i32 1
+#define TCG_TARGET_HAS_clz_i32 1
+#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
+#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 0
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_mulu2_i32 0
#define TCG_TARGET_HAS_muls2_i32 0
@@ -99,7 +107,12 @@ typedef enum {
#define TCG_TARGET_HAS_eqv_i64 1
#define TCG_TARGET_HAS_nand_i64 1
#define TCG_TARGET_HAS_nor_i64 1
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
+#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index a3262cfb0c..64f67d2c77 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -77,11 +77,15 @@
#define TCG_CT_CONST_U32 0x800
#define TCG_CT_CONST_ZERO 0x1000
#define TCG_CT_CONST_MONE 0x2000
+#define TCG_CT_CONST_WSZ 0x4000
static tcg_insn_unit *tb_ret_addr;
#include "elf.h"
-static bool have_isa_2_06;
+
+bool have_isa_2_06;
+bool have_isa_3_00;
+
#define HAVE_ISA_2_06 have_isa_2_06
#define HAVE_ISEL have_isa_2_06
@@ -259,12 +263,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch (ct_str[0]) {
+ switch (*ct_str++) {
case 'A': case 'B': case 'C': case 'D':
ct->ct |= TCG_CT_REG;
tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
@@ -307,15 +309,16 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
case 'U':
ct->ct |= TCG_CT_CONST_U32;
break;
+ case 'W':
+ ct->ct |= TCG_CT_CONST_WSZ;
+ break;
case 'Z':
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
/* test if a constant matches the constraint */
@@ -345,6 +348,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
return 1;
} else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
return 1;
+ } else if ((ct & TCG_CT_CONST_WSZ)
+ && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ return 1;
}
return 0;
}
@@ -449,6 +455,10 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
#define NOR XO31(124)
#define CNTLZW XO31( 26)
#define CNTLZD XO31( 58)
+#define CNTTZW XO31(538)
+#define CNTTZD XO31(570)
+#define CNTPOPW XO31(378)
+#define CNTPOPD XO31(506)
#define ANDC XO31( 60)
#define ORC XO31(412)
#define EQV XO31(284)
@@ -1170,6 +1180,32 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
}
}
+static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
+ TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
+{
+ if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ tcg_out32(s, opc | RA(a0) | RS(a1));
+ } else {
+ tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
+ /* Note that the only other valid constant for a2 is 0. */
+ if (HAVE_ISEL) {
+ tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
+ tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
+ } else if (!const_a2 && a0 == a2) {
+ tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
+ tcg_out32(s, opc | RA(a0) | RS(a1));
+ } else {
+ tcg_out32(s, opc | RA(a0) | RS(a1));
+ tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
+ if (const_a2) {
+ tcg_out_movi(s, type, a0, 0);
+ } else {
+ tcg_out_mov(s, type, a0, a2);
+ }
+ }
+ }
+}
+
static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
const int *const_args)
{
@@ -2107,6 +2143,30 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
break;
+ case INDEX_op_clz_i32:
+ tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
+ args[2], const_args[2]);
+ break;
+ case INDEX_op_ctz_i32:
+ tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
+ args[2], const_args[2]);
+ break;
+ case INDEX_op_ctpop_i32:
+ tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
+ break;
+
+ case INDEX_op_clz_i64:
+ tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
+ args[2], const_args[2]);
+ break;
+ case INDEX_op_ctz_i64:
+ tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
+ args[2], const_args[2]);
+ break;
+ case INDEX_op_ctpop_i64:
+ tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
+ break;
+
case INDEX_op_mul_i32:
a0 = args[0], a1 = args[1], a2 = args[2];
if (const_args[2]) {
@@ -2396,6 +2456,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
}
break;
+ case INDEX_op_extract_i32:
+ tcg_out_rlw(s, RLWINM, args[0], args[1],
+ 32 - args[2], 32 - args[3], 31);
+ break;
+ case INDEX_op_extract_i64:
+ tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
+ break;
+
case INDEX_op_movcond_i32:
tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
args[3], args[4], const_args[2]);
@@ -2511,6 +2579,9 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_eqv_i32, { "r", "r", "ri" } },
{ INDEX_op_nand_i32, { "r", "r", "r" } },
{ INDEX_op_nor_i32, { "r", "r", "r" } },
+ { INDEX_op_clz_i32, { "r", "r", "rZW" } },
+ { INDEX_op_ctz_i32, { "r", "r", "rZW" } },
+ { INDEX_op_ctpop_i32, { "r", "r" } },
{ INDEX_op_shl_i32, { "r", "r", "ri" } },
{ INDEX_op_shr_i32, { "r", "r", "ri" } },
@@ -2530,6 +2601,7 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } },
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
+ { INDEX_op_extract_i32, { "r", "r" } },
{ INDEX_op_muluh_i32, { "r", "r", "r" } },
{ INDEX_op_mulsh_i32, { "r", "r", "r" } },
@@ -2558,6 +2630,9 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_eqv_i64, { "r", "r", "r" } },
{ INDEX_op_nand_i64, { "r", "r", "r" } },
{ INDEX_op_nor_i64, { "r", "r", "r" } },
+ { INDEX_op_clz_i64, { "r", "r", "rZW" } },
+ { INDEX_op_ctz_i64, { "r", "r", "rZW" } },
+ { INDEX_op_ctpop_i64, { "r", "r" } },
{ INDEX_op_shl_i64, { "r", "r", "ri" } },
{ INDEX_op_shr_i64, { "r", "r", "ri" } },
@@ -2585,6 +2660,7 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } },
{ INDEX_op_deposit_i64, { "r", "0", "rZ" } },
+ { INDEX_op_extract_i64, { "r", "r" } },
{ INDEX_op_mulsh_i64, { "r", "r", "r" } },
{ INDEX_op_muluh_i64, { "r", "r", "r" } },
@@ -2624,12 +2700,31 @@ static const TCGTargetOpDef ppc_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(ppc_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (ppc_op_defs[i].op == op) {
+ return &ppc_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
static void tcg_target_init(TCGContext *s)
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
+ unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
+
if (hwcap & PPC_FEATURE_ARCH_2_06) {
have_isa_2_06 = true;
}
+#ifdef PPC_FEATURE2_ARCH_3_00
+ if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
+ have_isa_3_00 = true;
+ }
+#endif
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
@@ -2660,8 +2755,6 @@ static void tcg_target_init(TCGContext *s)
if (USE_REG_RA) {
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return addr */
}
-
- tcg_add_target_add_op_defs(ppc_op_defs);
}
#ifdef __ELF__
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
index 0c1af244f3..cbdd2a6275 100644
--- a/tcg/s390/tcg-target.h
+++ b/tcg/s390/tcg-target.h
@@ -49,63 +49,81 @@ typedef enum TCGReg {
#define TCG_TARGET_NB_REGS 16
-/* optional instructions */
-#define TCG_TARGET_HAS_div2_i32 1
-#define TCG_TARGET_HAS_rot_i32 1
-#define TCG_TARGET_HAS_ext8s_i32 1
-#define TCG_TARGET_HAS_ext16s_i32 1
-#define TCG_TARGET_HAS_ext8u_i32 1
-#define TCG_TARGET_HAS_ext16u_i32 1
-#define TCG_TARGET_HAS_bswap16_i32 1
-#define TCG_TARGET_HAS_bswap32_i32 1
-#define TCG_TARGET_HAS_not_i32 0
-#define TCG_TARGET_HAS_neg_i32 1
-#define TCG_TARGET_HAS_andc_i32 0
-#define TCG_TARGET_HAS_orc_i32 0
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_deposit_i32 1
-#define TCG_TARGET_HAS_movcond_i32 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 0
-#define TCG_TARGET_HAS_muls2_i32 0
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_extrl_i64_i32 0
-#define TCG_TARGET_HAS_extrh_i64_i32 0
+/* A list of relevant facilities used by this translator. Some of these
+ are required for proper operation, and these are checked at startup. */
+
+#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
+#define FACILITY_LONG_DISP (1ULL << (63 - 18))
+#define FACILITY_EXT_IMM (1ULL << (63 - 21))
+#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
+#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
+#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
-#define TCG_TARGET_HAS_div2_i64 1
-#define TCG_TARGET_HAS_rot_i64 1
-#define TCG_TARGET_HAS_ext8s_i64 1
-#define TCG_TARGET_HAS_ext16s_i64 1
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 1
-#define TCG_TARGET_HAS_ext16u_i64 1
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 1
-#define TCG_TARGET_HAS_bswap32_i64 1
-#define TCG_TARGET_HAS_bswap64_i64 1
-#define TCG_TARGET_HAS_not_i64 0
-#define TCG_TARGET_HAS_neg_i64 1
-#define TCG_TARGET_HAS_andc_i64 0
-#define TCG_TARGET_HAS_orc_i64 0
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_deposit_i64 1
-#define TCG_TARGET_HAS_movcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 1
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 0
-#define TCG_TARGET_HAS_mulsh_i64 0
+extern uint64_t s390_facilities;
+
+/* optional instructions */
+#define TCG_TARGET_HAS_div2_i32 1
+#define TCG_TARGET_HAS_rot_i32 1
+#define TCG_TARGET_HAS_ext8s_i32 1
+#define TCG_TARGET_HAS_ext16s_i32 1
+#define TCG_TARGET_HAS_ext8u_i32 1
+#define TCG_TARGET_HAS_ext16u_i32 1
+#define TCG_TARGET_HAS_bswap16_i32 1
+#define TCG_TARGET_HAS_bswap32_i32 1
+#define TCG_TARGET_HAS_not_i32 0
+#define TCG_TARGET_HAS_neg_i32 1
+#define TCG_TARGET_HAS_andc_i32 0
+#define TCG_TARGET_HAS_orc_i32 0
+#define TCG_TARGET_HAS_eqv_i32 0
+#define TCG_TARGET_HAS_nand_i32 0
+#define TCG_TARGET_HAS_nor_i32 0
+#define TCG_TARGET_HAS_clz_i32 0
+#define TCG_TARGET_HAS_ctz_i32 0
+#define TCG_TARGET_HAS_ctpop_i32 0
+#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
+#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT)
+#define TCG_TARGET_HAS_sextract_i32 0
+#define TCG_TARGET_HAS_movcond_i32 1
+#define TCG_TARGET_HAS_add2_i32 1
+#define TCG_TARGET_HAS_sub2_i32 1
+#define TCG_TARGET_HAS_mulu2_i32 0
+#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 0
+#define TCG_TARGET_HAS_mulsh_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
-extern bool tcg_target_deposit_valid(int ofs, int len);
-#define TCG_TARGET_deposit_i32_valid tcg_target_deposit_valid
-#define TCG_TARGET_deposit_i64_valid tcg_target_deposit_valid
+#define TCG_TARGET_HAS_div2_i64 1
+#define TCG_TARGET_HAS_rot_i64 1
+#define TCG_TARGET_HAS_ext8s_i64 1
+#define TCG_TARGET_HAS_ext16s_i64 1
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext8u_i64 1
+#define TCG_TARGET_HAS_ext16u_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#define TCG_TARGET_HAS_bswap16_i64 1
+#define TCG_TARGET_HAS_bswap32_i64 1
+#define TCG_TARGET_HAS_bswap64_i64 1
+#define TCG_TARGET_HAS_not_i64 0
+#define TCG_TARGET_HAS_neg_i64 1
+#define TCG_TARGET_HAS_andc_i64 0
+#define TCG_TARGET_HAS_orc_i64 0
+#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_nand_i64 0
+#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM)
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i64 0
+#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
+#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT)
+#define TCG_TARGET_HAS_sextract_i64 0
+#define TCG_TARGET_HAS_movcond_i64 1
+#define TCG_TARGET_HAS_add2_i64 1
+#define TCG_TARGET_HAS_sub2_i64 1
+#define TCG_TARGET_HAS_mulu2_i64 1
+#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 0
+#define TCG_TARGET_HAS_mulsh_i64 0
/* used for function call generation */
#define TCG_REG_CALL_STACK TCG_REG_R15
diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c
index 253d4a0a0b..a679280b92 100644
--- a/tcg/s390/tcg-target.inc.c
+++ b/tcg/s390/tcg-target.inc.c
@@ -43,13 +43,14 @@
#define TCG_CT_CONST_XORI 0x400
#define TCG_CT_CONST_CMPI 0x800
#define TCG_CT_CONST_ADLI 0x1000
+#define TCG_CT_CONST_ZERO 0x2000
/* Several places within the instruction set 0 means "no register"
rather than TCG_REG_R0. */
#define TCG_REG_NONE 0
/* A scratch register that may be be used throughout the backend. */
-#define TCG_TMP0 TCG_REG_R14
+#define TCG_TMP0 TCG_REG_R1
#ifndef CONFIG_SOFTMMU
#define TCG_GUEST_BASE_REG TCG_REG_R13
@@ -132,6 +133,7 @@ typedef enum S390Opcode {
RRE_DLR = 0xb997,
RRE_DSGFR = 0xb91d,
RRE_DSGR = 0xb90d,
+ RRE_FLOGR = 0xb983,
RRE_LGBR = 0xb906,
RRE_LCGR = 0xb903,
RRE_LGFR = 0xb914,
@@ -334,18 +336,7 @@ static void * const qemu_st_helpers[16] = {
#endif
static tcg_insn_unit *tb_ret_addr;
-
-/* A list of relevant facilities used by this translator. Some of these
- are required for proper operation, and these are checked at startup. */
-
-#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2))
-#define FACILITY_LONG_DISP (1ULL << (63 - 18))
-#define FACILITY_EXT_IMM (1ULL << (63 - 21))
-#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34))
-#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45))
-#define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND
-
-static uint64_t facilities;
+uint64_t s390_facilities;
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
@@ -369,20 +360,14 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str = *pct_str;
-
- switch (ct_str[0]) {
+ switch (*ct_str++) {
case 'r': /* all registers */
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffff);
break;
- case 'R': /* not R0 */
- ct->ct |= TCG_CT_REG;
- tcg_regset_set32(ct->u.regs, 0, 0xffff);
- tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
- break;
case 'L': /* qemu_ld/st constraint */
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffff);
@@ -415,13 +400,13 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
case 'C':
ct->ct |= TCG_CT_CONST_CMPI;
break;
+ case 'Z':
+ ct->ct |= TCG_CT_CONST_ZERO;
+ break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
-
- return 0;
+ return ct_str;
}
/* Immediates to be used with logical OR. This is an optimization only,
@@ -432,7 +417,7 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
static int tcg_match_ori(TCGType type, tcg_target_long val)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if (type == TCG_TYPE_I32) {
/* All 32-bit ORs can be performed with 1 48-bit insn. */
return 1;
@@ -444,7 +429,7 @@ static int tcg_match_ori(TCGType type, tcg_target_long val)
if (val == (int16_t)val) {
return 0;
}
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if (val == (int32_t)val) {
return 0;
}
@@ -461,7 +446,7 @@ static int tcg_match_ori(TCGType type, tcg_target_long val)
static int tcg_match_xori(TCGType type, tcg_target_long val)
{
- if ((facilities & FACILITY_EXT_IMM) == 0) {
+ if ((s390_facilities & FACILITY_EXT_IMM) == 0) {
return 0;
}
@@ -482,7 +467,7 @@ static int tcg_match_xori(TCGType type, tcg_target_long val)
static int tcg_match_cmpi(TCGType type, tcg_target_long val)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
/* The COMPARE IMMEDIATE instruction is available. */
if (type == TCG_TYPE_I32) {
/* We have a 32-bit immediate and can compare against anything. */
@@ -511,7 +496,7 @@ static int tcg_match_cmpi(TCGType type, tcg_target_long val)
static int tcg_match_add2i(TCGType type, tcg_target_long val)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if (type == TCG_TYPE_I32) {
return 1;
} else if (val >= -0xffffffffll && val <= 0xffffffffll) {
@@ -541,7 +526,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
general-instruction-extensions, then we have MULTIPLY SINGLE
IMMEDIATE with a signed 32-bit, otherwise we have only
MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
- if (facilities & FACILITY_GEN_INST_EXT) {
+ if (s390_facilities & FACILITY_GEN_INST_EXT) {
return val == (int32_t)val;
} else {
return val == (int16_t)val;
@@ -554,6 +539,8 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
return tcg_match_xori(type, val);
} else if (ct & TCG_CT_CONST_CMPI) {
return tcg_match_cmpi(type, val);
+ } else if (ct & TCG_CT_CONST_ZERO) {
+ return val == 0;
}
return 0;
@@ -668,7 +655,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
}
/* Try all 48-bit insns that can load it in one go. */
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if (sval == (int32_t)sval) {
tcg_out_insn(s, RIL, LGFI, ret, sval);
return;
@@ -694,7 +681,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
/* If extended immediates are not present, then we may have to issue
several instructions to load the low 32 bits. */
- if (!(facilities & FACILITY_EXT_IMM)) {
+ if (!(s390_facilities & FACILITY_EXT_IMM)) {
/* A 32-bit unsigned value can be loaded in 2 insns. And given
that the lli_insns loop above did not succeed, we know that
both insns are required. */
@@ -727,7 +714,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
/* Insert data into the high 32-bits. */
uval = uval >> 31 >> 1;
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if (uval < 0x10000) {
tcg_out_insn(s, RI, IIHL, ret, uval);
} else if ((uval & 0xffff) == 0) {
@@ -810,7 +797,7 @@ static void tcg_out_ld_abs(TCGContext *s, TCGType type, TCGReg dest, void *abs)
{
intptr_t addr = (intptr_t)abs;
- if ((facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
+ if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) {
ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
if (disp == (int32_t)disp) {
if (type == TCG_TYPE_I32) {
@@ -837,7 +824,7 @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
tcg_out_insn(s, RRE, LGBR, dest, src);
return;
}
@@ -857,7 +844,7 @@ static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
tcg_out_insn(s, RRE, LLGCR, dest, src);
return;
}
@@ -877,7 +864,7 @@ static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
tcg_out_insn(s, RRE, LGHR, dest, src);
return;
}
@@ -897,7 +884,7 @@ static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
{
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
tcg_out_insn(s, RRE, LLGHR, dest, src);
return;
}
@@ -985,7 +972,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
tgen_ext32u(s, dest, dest);
return;
}
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if ((val & valid) == 0xff) {
tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
return;
@@ -1006,7 +993,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
}
/* Try all 48-bit insns that can perform it in one go. */
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
for (i = 0; i < 2; i++) {
tcg_target_ulong mask = ~(0xffffffffull << i*32);
if (((val | ~valid) & mask) == mask) {
@@ -1015,7 +1002,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
}
}
}
- if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
+ if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) {
tgen_andi_risbg(s, dest, dest, val);
return;
}
@@ -1045,7 +1032,7 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
return;
}
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
/* Try all 32-bit insns that can perform it in one go. */
for (i = 0; i < 4; i++) {
tcg_target_ulong mask = (0xffffull << i*16);
@@ -1093,33 +1080,43 @@ static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
}
static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
- TCGArg c2, int c2const)
+ TCGArg c2, bool c2const, bool need_carry)
{
bool is_unsigned = is_unsigned_cond(c);
if (c2const) {
if (c2 == 0) {
+ if (!(is_unsigned && need_carry)) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RR, LTR, r1, r1);
+ } else {
+ tcg_out_insn(s, RRE, LTGR, r1, r1);
+ }
+ return tcg_cond_to_ltr_cond[c];
+ }
+ /* If we only got here because of load-and-test,
+ and we couldn't use that, then we need to load
+ the constant into a register. */
+ if (!(s390_facilities & FACILITY_EXT_IMM)) {
+ c2 = TCG_TMP0;
+ tcg_out_movi(s, type, c2, 0);
+ goto do_reg;
+ }
+ }
+ if (is_unsigned) {
if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RR, LTR, r1, r1);
+ tcg_out_insn(s, RIL, CLFI, r1, c2);
} else {
- tcg_out_insn(s, RRE, LTGR, r1, r1);
+ tcg_out_insn(s, RIL, CLGFI, r1, c2);
}
- return tcg_cond_to_ltr_cond[c];
} else {
- if (is_unsigned) {
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RIL, CLFI, r1, c2);
- } else {
- tcg_out_insn(s, RIL, CLGFI, r1, c2);
- }
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RIL, CFI, r1, c2);
} else {
- if (type == TCG_TYPE_I32) {
- tcg_out_insn(s, RIL, CFI, r1, c2);
- } else {
- tcg_out_insn(s, RIL, CGFI, r1, c2);
- }
+ tcg_out_insn(s, RIL, CGFI, r1, c2);
}
}
} else {
+ do_reg:
if (is_unsigned) {
if (type == TCG_TYPE_I32) {
tcg_out_insn(s, RR, CLR, r1, c2);
@@ -1148,7 +1145,7 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
do_greater:
/* The result of a compare has CC=2 for GT and CC=3 unused.
ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
- tgen_cmp(s, type, cond, c1, c2, c2const);
+ tgen_cmp(s, type, cond, c1, c2, c2const, true);
tcg_out_movi(s, type, dest, 0);
tcg_out_insn(s, RRE, ALCGR, dest, dest);
return;
@@ -1219,8 +1216,8 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
break;
}
- cc = tgen_cmp(s, type, cond, c1, c2, c2const);
- if (facilities & FACILITY_LOAD_ON_COND) {
+ cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
+ if (s390_facilities & FACILITY_LOAD_ON_COND) {
/* Emit: d = 0, t = 1, d = (cc ? t : d). */
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
@@ -1237,12 +1234,12 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
TCGReg c1, TCGArg c2, int c2const, TCGReg r3)
{
int cc;
- if (facilities & FACILITY_LOAD_ON_COND) {
- cc = tgen_cmp(s, type, c, c1, c2, c2const);
+ if (s390_facilities & FACILITY_LOAD_ON_COND) {
+ cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
tcg_out_insn(s, RRF, LOCGR, dest, r3, cc);
} else {
c = tcg_invert_cond(c);
- cc = tgen_cmp(s, type, c, c1, c2, c2const);
+ cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
/* Emit: if (cc) goto over; dest = r3; over: */
tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
@@ -1250,17 +1247,45 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
}
}
-bool tcg_target_deposit_valid(int ofs, int len)
+static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
+ TCGArg a2, int a2const)
{
- return (facilities & FACILITY_GEN_INST_EXT) != 0;
+ /* Since this sets both R and R+1, we have no choice but to store the
+ result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
+ QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
+ tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
+
+ if (a2const && a2 == 64) {
+ tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
+ } else {
+ if (a2const) {
+ tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
+ }
+ if (s390_facilities & FACILITY_LOAD_ON_COND) {
+ /* Emit: if (one bit found) dest = r0. */
+ tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
+ } else {
+ /* Emit: if (no one bit found) goto over; dest = r0; over: */
+ tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
+ tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
+ }
+ }
}
static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
- int ofs, int len)
+ int ofs, int len, int z)
{
int lsb = (63 - ofs);
int msb = lsb - (len - 1);
- tcg_out_risbg(s, dest, src, msb, lsb, ofs, 0);
+ tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
+}
+
+static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
+ int ofs, int len)
+{
+ tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
}
static void tgen_gotoi(TCGContext *s, int cc, tcg_insn_unit *dest)
@@ -1332,7 +1357,7 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
{
int cc;
- if (facilities & FACILITY_GEN_INST_EXT) {
+ if (s390_facilities & FACILITY_GEN_INST_EXT) {
bool is_unsigned = is_unsigned_cond(c);
bool in_range;
S390Opcode opc;
@@ -1374,7 +1399,7 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
}
}
- cc = tgen_cmp(s, type, c, r1, c2, c2const);
+ cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
tgen_branch(s, cc, l);
}
@@ -1519,7 +1544,7 @@ static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
- if (facilities & FACILITY_GEN_INST_EXT) {
+ if (s390_facilities & FACILITY_GEN_INST_EXT) {
tcg_out_risbg(s, TCG_REG_R2, addr_reg,
64 - CPU_TLB_BITS - CPU_TLB_ENTRY_BITS,
63 - CPU_TLB_ENTRY_BITS,
@@ -1790,7 +1815,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, RI, AHI, a0, a2);
break;
}
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
tcg_out_insn(s, RIL, AFI, a0, a2);
break;
}
@@ -1986,7 +2011,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_insn(s, RI, AGHI, a0, a2);
break;
}
- if (facilities & FACILITY_EXT_IMM) {
+ if (s390_facilities & FACILITY_EXT_IMM) {
if (a2 == (int32_t)a2) {
tcg_out_insn(s, RIL, AGFI, a0, a2);
break;
@@ -2167,7 +2192,30 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
OP_32_64(deposit):
- tgen_deposit(s, args[0], args[2], args[3], args[4]);
+ a0 = args[0], a1 = args[1], a2 = args[2];
+ if (const_args[1]) {
+ tgen_deposit(s, a0, a2, args[3], args[4], 1);
+ } else {
+ /* Since we can't support "0Z" as a constraint, we allow a1 in
+ any register. Fix things up as if a matching constraint. */
+ if (a0 != a1) {
+ TCGType type = (opc == INDEX_op_deposit_i64);
+ if (a0 == a2) {
+ tcg_out_mov(s, type, TCG_TMP0, a2);
+ a2 = TCG_TMP0;
+ }
+ tcg_out_mov(s, type, a0, a1);
+ }
+ tgen_deposit(s, a0, a2, args[3], args[4], 0);
+ }
+ break;
+
+ OP_32_64(extract):
+ tgen_extract(s, args[0], args[1], args[2], args[3]);
+ break;
+
+ case INDEX_op_clz_i64:
+ tgen_clz(s, args[0], args[1], args[2], const_args[2]);
break;
case INDEX_op_mb:
@@ -2175,7 +2223,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
serialize the instruction stream. */
if (args[0] & TCG_MO_ST_LD) {
tcg_out_insn(s, RR, BCR,
- facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
+ s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
}
break;
@@ -2216,12 +2264,12 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_neg_i32, { "r", "r" } },
- { INDEX_op_shl_i32, { "r", "0", "Ri" } },
- { INDEX_op_shr_i32, { "r", "0", "Ri" } },
- { INDEX_op_sar_i32, { "r", "0", "Ri" } },
+ { INDEX_op_shl_i32, { "r", "0", "ri" } },
+ { INDEX_op_shr_i32, { "r", "0", "ri" } },
+ { INDEX_op_sar_i32, { "r", "0", "ri" } },
- { INDEX_op_rotl_i32, { "r", "r", "Ri" } },
- { INDEX_op_rotr_i32, { "r", "r", "Ri" } },
+ { INDEX_op_rotl_i32, { "r", "r", "ri" } },
+ { INDEX_op_rotr_i32, { "r", "r", "ri" } },
{ INDEX_op_ext8s_i32, { "r", "r" } },
{ INDEX_op_ext8u_i32, { "r", "r" } },
@@ -2237,7 +2285,8 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_brcond_i32, { "r", "rC" } },
{ INDEX_op_setcond_i32, { "r", "r", "rC" } },
{ INDEX_op_movcond_i32, { "r", "r", "rC", "r", "0" } },
- { INDEX_op_deposit_i32, { "r", "0", "r" } },
+ { INDEX_op_deposit_i32, { "r", "rZ", "r" } },
+ { INDEX_op_extract_i32, { "r", "r" } },
{ INDEX_op_qemu_ld_i32, { "r", "L" } },
{ INDEX_op_qemu_ld_i64, { "r", "L" } },
@@ -2271,12 +2320,12 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_neg_i64, { "r", "r" } },
- { INDEX_op_shl_i64, { "r", "r", "Ri" } },
- { INDEX_op_shr_i64, { "r", "r", "Ri" } },
- { INDEX_op_sar_i64, { "r", "r", "Ri" } },
+ { INDEX_op_shl_i64, { "r", "r", "ri" } },
+ { INDEX_op_shr_i64, { "r", "r", "ri" } },
+ { INDEX_op_sar_i64, { "r", "r", "ri" } },
- { INDEX_op_rotl_i64, { "r", "r", "Ri" } },
- { INDEX_op_rotr_i64, { "r", "r", "Ri" } },
+ { INDEX_op_rotl_i64, { "r", "r", "ri" } },
+ { INDEX_op_rotr_i64, { "r", "r", "ri" } },
{ INDEX_op_ext8s_i64, { "r", "r" } },
{ INDEX_op_ext8u_i64, { "r", "r" } },
@@ -2292,6 +2341,8 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_bswap32_i64, { "r", "r" } },
{ INDEX_op_bswap64_i64, { "r", "r" } },
+ { INDEX_op_clz_i64, { "r", "r", "ri" } },
+
{ INDEX_op_add2_i64, { "r", "r", "0", "1", "rA", "r" } },
{ INDEX_op_sub2_i64, { "r", "r", "0", "1", "rA", "r" } },
@@ -2299,12 +2350,25 @@ static const TCGTargetOpDef s390_op_defs[] = {
{ INDEX_op_setcond_i64, { "r", "r", "rC" } },
{ INDEX_op_movcond_i64, { "r", "r", "rC", "r", "0" } },
{ INDEX_op_deposit_i64, { "r", "0", "r" } },
+ { INDEX_op_extract_i64, { "r", "r" } },
{ INDEX_op_mb, { } },
{ -1 },
};
-static void query_facilities(void)
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(s390_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (s390_op_defs[i].op == op) {
+ return &s390_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
+static void query_s390_facilities(void)
{
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
@@ -2315,7 +2379,7 @@ static void query_facilities(void)
register void *r1 __asm__("1");
/* stfle 0(%r1) */
- r1 = &facilities;
+ r1 = &s390_facilities;
asm volatile(".word 0xb2b0,0x1000"
: "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc");
}
@@ -2323,7 +2387,7 @@ static void query_facilities(void)
static void tcg_target_init(TCGContext *s)
{
- query_facilities();
+ query_s390_facilities();
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);
@@ -2346,8 +2410,6 @@ static void tcg_target_init(TCGContext *s)
/* XXX many insns can't be used with R0, so we better avoid it for now */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
-
- tcg_add_target_add_op_defs(s390_op_defs);
}
#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
index 88f9c90f5f..b8b74f96ff 100644
--- a/tcg/sparc/tcg-target.h
+++ b/tcg/sparc/tcg-target.h
@@ -110,7 +110,12 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
+#define TCG_TARGET_HAS_clz_i32 0
+#define TCG_TARGET_HAS_ctz_i32 0
+#define TCG_TARGET_HAS_ctpop_i32 0
#define TCG_TARGET_HAS_deposit_i32 0
+#define TCG_TARGET_HAS_extract_i32 0
+#define TCG_TARGET_HAS_sextract_i32 0
#define TCG_TARGET_HAS_movcond_i32 1
#define TCG_TARGET_HAS_add2_i32 1
#define TCG_TARGET_HAS_sub2_i32 1
@@ -140,7 +145,12 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_eqv_i64 0
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 0
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i64 0
#define TCG_TARGET_HAS_deposit_i64 0
+#define TCG_TARGET_HAS_extract_i64 0
+#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_movcond_i64 1
#define TCG_TARGET_HAS_add2_i64 1
#define TCG_TARGET_HAS_sub2_i64 1
diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c
index 700c43487f..d1f4c0dead 100644
--- a/tcg/sparc/tcg-target.inc.c
+++ b/tcg/sparc/tcg-target.inc.c
@@ -319,12 +319,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* parse target specific constraints */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str;
-
- ct_str = *pct_str;
- switch (ct_str[0]) {
+ switch (*ct_str++) {
case 'r':
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
@@ -360,11 +358,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
ct->ct |= TCG_CT_CONST_ZERO;
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
/* test if a constant matches the constraint */
@@ -1583,6 +1579,18 @@ static const TCGTargetOpDef sparc_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(sparc_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (sparc_op_defs[i].op == op) {
+ return &sparc_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
static void tcg_target_init(TCGContext *s)
{
/* Only probe for the platform and capabilities if we havn't already
@@ -1622,8 +1630,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
-
- tcg_add_target_add_op_defs(sparc_op_defs);
}
#if SPARC64
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 6e2fb3522f..95a39b7d8c 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -457,6 +457,117 @@ void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
+void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_clz_i32) {
+ tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_clz_i64) {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t1, arg1);
+ tcg_gen_extu_i32_i64(t2, arg2);
+ tcg_gen_addi_i64(t2, t2, 32);
+ tcg_gen_clz_i64(t1, t1, t2);
+ tcg_gen_extrl_i64_i32(ret, t1);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_gen_subi_i32(ret, ret, 32);
+ } else {
+ gen_helper_clz_i32(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
+{
+ TCGv_i32 t = tcg_const_i32(arg2);
+ tcg_gen_clz_i32(ret, arg1, t);
+ tcg_temp_free_i32(t);
+}
+
+void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ if (TCG_TARGET_HAS_ctz_i32) {
+ tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_ctz_i64) {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t1, arg1);
+ tcg_gen_extu_i32_i64(t2, arg2);
+ tcg_gen_ctz_i64(t1, t1, t2);
+ tcg_gen_extrl_i64_i32(ret, t1);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ } else if (TCG_TARGET_HAS_ctpop_i32
+ || TCG_TARGET_HAS_ctpop_i64
+ || TCG_TARGET_HAS_clz_i32
+ || TCG_TARGET_HAS_clz_i64) {
+ TCGv_i32 z, t = tcg_temp_new_i32();
+
+ if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) {
+ tcg_gen_subi_i32(t, arg1, 1);
+ tcg_gen_andc_i32(t, t, arg1);
+ tcg_gen_ctpop_i32(t, t);
+ } else {
+ /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */
+ tcg_gen_neg_i32(t, arg1);
+ tcg_gen_and_i32(t, t, arg1);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_xori_i32(t, t, 31);
+ }
+ z = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t);
+ tcg_temp_free_i32(t);
+ tcg_temp_free_i32(z);
+ } else {
+ gen_helper_ctz_i32(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2)
+{
+ if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) {
+ /* This equivalence has the advantage of not requiring a fixup. */
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_subi_i32(t, arg1, 1);
+ tcg_gen_andc_i32(t, t, arg1);
+ tcg_gen_ctpop_i32(ret, t);
+ tcg_temp_free_i32(t);
+ } else {
+ TCGv_i32 t = tcg_const_i32(arg2);
+ tcg_gen_ctz_i32(ret, arg1, t);
+ tcg_temp_free_i32(t);
+ }
+}
+
+void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg)
+{
+ if (TCG_TARGET_HAS_clz_i32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t, arg, 31);
+ tcg_gen_xor_i32(t, t, arg);
+ tcg_gen_clzi_i32(t, t, 32);
+ tcg_gen_subi_i32(ret, t, 1);
+ tcg_temp_free_i32(t);
+ } else {
+ gen_helper_clrsb_i32(ret, arg);
+ }
+}
+
+void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ if (TCG_TARGET_HAS_ctpop_i32) {
+ tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1);
+ } else if (TCG_TARGET_HAS_ctpop_i64) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t, arg1);
+ tcg_gen_ctpop_i64(t, t);
+ tcg_gen_extrl_i64_i32(ret, t);
+ tcg_temp_free_i64(t);
+ } else {
+ gen_helper_ctpop_i32(ret, arg1);
+ }
+}
+
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
if (TCG_TARGET_HAS_rot_i32) {
@@ -533,10 +644,11 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
TCGv_i32 t1;
tcg_debug_assert(ofs < 32);
+ tcg_debug_assert(len > 0);
tcg_debug_assert(len <= 32);
tcg_debug_assert(ofs + len <= 32);
- if (ofs == 0 && len == 32) {
+ if (len == 32) {
tcg_gen_mov_i32(ret, arg2);
return;
}
@@ -560,6 +672,189 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
tcg_temp_free_i32(t1);
}
+void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len)
+{
+ tcg_debug_assert(ofs < 32);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= 32);
+ tcg_debug_assert(ofs + len <= 32);
+
+ if (ofs + len == 32) {
+ tcg_gen_shli_i32(ret, arg, ofs);
+ } else if (ofs == 0) {
+ tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
+ } else if (TCG_TARGET_HAS_deposit_i32
+ && TCG_TARGET_deposit_i32_valid(ofs, len)) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
+ tcg_temp_free_i32(zero);
+ } else {
+ /* To help two-operand hosts we prefer to zero-extend first,
+ which allows ARG to stay live. */
+ switch (len) {
+ case 16:
+ if (TCG_TARGET_HAS_ext16u_i32) {
+ tcg_gen_ext16u_i32(ret, arg);
+ tcg_gen_shli_i32(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8u_i32) {
+ tcg_gen_ext8u_i32(ret, arg);
+ tcg_gen_shli_i32(ret, ret, ofs);
+ return;
+ }
+ break;
+ }
+ /* Otherwise prefer zero-extension over AND for code size. */
+ switch (ofs + len) {
+ case 16:
+ if (TCG_TARGET_HAS_ext16u_i32) {
+ tcg_gen_shli_i32(ret, arg, ofs);
+ tcg_gen_ext16u_i32(ret, ret);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8u_i32) {
+ tcg_gen_shli_i32(ret, arg, ofs);
+ tcg_gen_ext8u_i32(ret, ret);
+ return;
+ }
+ break;
+ }
+ tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
+ tcg_gen_shli_i32(ret, ret, ofs);
+ }
+}
+
+void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len)
+{
+ tcg_debug_assert(ofs < 32);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= 32);
+ tcg_debug_assert(ofs + len <= 32);
+
+ /* Canonicalize certain special cases, even if extract is supported. */
+ if (ofs + len == 32) {
+ tcg_gen_shri_i32(ret, arg, 32 - len);
+ return;
+ }
+ if (ofs == 0) {
+ tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
+ return;
+ }
+
+ if (TCG_TARGET_HAS_extract_i32
+ && TCG_TARGET_extract_i32_valid(ofs, len)) {
+ tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
+ return;
+ }
+
+ /* Assume that zero-extension, if available, is cheaper than a shift. */
+ switch (ofs + len) {
+ case 16:
+ if (TCG_TARGET_HAS_ext16u_i32) {
+ tcg_gen_ext16u_i32(ret, arg);
+ tcg_gen_shri_i32(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8u_i32) {
+ tcg_gen_ext8u_i32(ret, arg);
+ tcg_gen_shri_i32(ret, ret, ofs);
+ return;
+ }
+ break;
+ }
+
+ /* ??? Ideally we'd know what values are available for immediate AND.
+ Assume that 8 bits are available, plus the special case of 16,
+ so that we get ext8u, ext16u. */
+ switch (len) {
+ case 1 ... 8: case 16:
+ tcg_gen_shri_i32(ret, arg, ofs);
+ tcg_gen_andi_i32(ret, ret, (1u << len) - 1);
+ break;
+ default:
+ tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
+ tcg_gen_shri_i32(ret, ret, 32 - len);
+ break;
+ }
+}
+
+void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len)
+{
+ tcg_debug_assert(ofs < 32);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= 32);
+ tcg_debug_assert(ofs + len <= 32);
+
+ /* Canonicalize certain special cases, even if extract is supported. */
+ if (ofs + len == 32) {
+ tcg_gen_sari_i32(ret, arg, 32 - len);
+ return;
+ }
+ if (ofs == 0) {
+ switch (len) {
+ case 16:
+ tcg_gen_ext16s_i32(ret, arg);
+ return;
+ case 8:
+ tcg_gen_ext8s_i32(ret, arg);
+ return;
+ }
+ }
+
+ if (TCG_TARGET_HAS_sextract_i32
+ && TCG_TARGET_extract_i32_valid(ofs, len)) {
+ tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
+ return;
+ }
+
+ /* Assume that sign-extension, if available, is cheaper than a shift. */
+ switch (ofs + len) {
+ case 16:
+ if (TCG_TARGET_HAS_ext16s_i32) {
+ tcg_gen_ext16s_i32(ret, arg);
+ tcg_gen_sari_i32(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8s_i32) {
+ tcg_gen_ext8s_i32(ret, arg);
+ tcg_gen_sari_i32(ret, ret, ofs);
+ return;
+ }
+ break;
+ }
+ switch (len) {
+ case 16:
+ if (TCG_TARGET_HAS_ext16s_i32) {
+ tcg_gen_shri_i32(ret, arg, ofs);
+ tcg_gen_ext16s_i32(ret, ret);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8s_i32) {
+ tcg_gen_shri_i32(ret, arg, ofs);
+ tcg_gen_ext8s_i32(ret, ret);
+ return;
+ }
+ break;
+ }
+
+ tcg_gen_shli_i32(ret, arg, 32 - len - ofs);
+ tcg_gen_sari_i32(ret, ret, 32 - len);
+}
+
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2)
{
@@ -1519,6 +1814,115 @@ void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
}
}
+void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_clz_i64) {
+ tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2);
+ } else {
+ gen_helper_clz_i64(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32
+ && TCG_TARGET_HAS_clz_i32
+ && arg2 <= 0xffffffffu) {
+ TCGv_i32 t = tcg_const_i32((uint32_t)arg2 - 32);
+ tcg_gen_clz_i32(t, TCGV_LOW(arg1), t);
+ tcg_gen_addi_i32(t, t, 32);
+ tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ tcg_temp_free_i32(t);
+ } else {
+ TCGv_i64 t = tcg_const_i64(arg2);
+ tcg_gen_clz_i64(ret, arg1, t);
+ tcg_temp_free_i64(t);
+ }
+}
+
+void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ if (TCG_TARGET_HAS_ctz_i64) {
+ tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2);
+ } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) {
+ TCGv_i64 z, t = tcg_temp_new_i64();
+
+ if (TCG_TARGET_HAS_ctpop_i64) {
+ tcg_gen_subi_i64(t, arg1, 1);
+ tcg_gen_andc_i64(t, t, arg1);
+ tcg_gen_ctpop_i64(t, t);
+ } else {
+ /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */
+ tcg_gen_neg_i64(t, arg1);
+ tcg_gen_and_i64(t, t, arg1);
+ tcg_gen_clzi_i64(t, t, 64);
+ tcg_gen_xori_i64(t, t, 63);
+ }
+ z = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t);
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(z);
+ } else {
+ gen_helper_ctz_i64(ret, arg1, arg2);
+ }
+}
+
+void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2)
+{
+ if (TCG_TARGET_REG_BITS == 32
+ && TCG_TARGET_HAS_ctz_i32
+ && arg2 <= 0xffffffffu) {
+ TCGv_i32 t32 = tcg_const_i32((uint32_t)arg2 - 32);
+ tcg_gen_ctz_i32(t32, TCGV_HIGH(arg1), t32);
+ tcg_gen_addi_i32(t32, t32, 32);
+ tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ tcg_temp_free_i32(t32);
+ } else if (!TCG_TARGET_HAS_ctz_i64
+ && TCG_TARGET_HAS_ctpop_i64
+ && arg2 == 64) {
+ /* This equivalence has the advantage of not requiring a fixup. */
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_subi_i64(t, arg1, 1);
+ tcg_gen_andc_i64(t, t, arg1);
+ tcg_gen_ctpop_i64(ret, t);
+ tcg_temp_free_i64(t);
+ } else {
+ TCGv_i64 t64 = tcg_const_i64(arg2);
+ tcg_gen_ctz_i64(ret, arg1, t64);
+ tcg_temp_free_i64(t64);
+ }
+}
+
+void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg)
+{
+ if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) {
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t, arg, 63);
+ tcg_gen_xor_i64(t, t, arg);
+ tcg_gen_clzi_i64(t, t, 64);
+ tcg_gen_subi_i64(ret, t, 1);
+ tcg_temp_free_i64(t);
+ } else {
+ gen_helper_clrsb_i64(ret, arg);
+ }
+}
+
+void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1)
+{
+ if (TCG_TARGET_HAS_ctpop_i64) {
+ tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1);
+ } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) {
+ tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1));
+ tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1));
+ tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret));
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ } else {
+ gen_helper_ctpop_i64(ret, arg1);
+ }
+}
+
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
{
if (TCG_TARGET_HAS_rot_i64) {
@@ -1593,10 +1997,11 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
TCGv_i64 t1;
tcg_debug_assert(ofs < 64);
+ tcg_debug_assert(len > 0);
tcg_debug_assert(len <= 64);
tcg_debug_assert(ofs + len <= 64);
- if (ofs == 0 && len == 64) {
+ if (len == 64) {
tcg_gen_mov_i64(ret, arg2);
return;
}
@@ -1635,6 +2040,289 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
tcg_temp_free_i64(t1);
}
+void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len)
+{
+ tcg_debug_assert(ofs < 64);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= 64);
+ tcg_debug_assert(ofs + len <= 64);
+
+ if (ofs + len == 64) {
+ tcg_gen_shli_i64(ret, arg, ofs);
+ } else if (ofs == 0) {
+ tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
+ } else if (TCG_TARGET_HAS_deposit_i64
+ && TCG_TARGET_deposit_i64_valid(ofs, len)) {
+ TCGv_i64 zero = tcg_const_i64(0);
+ tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
+ tcg_temp_free_i64(zero);
+ } else {
+ if (TCG_TARGET_REG_BITS == 32) {
+ if (ofs >= 32) {
+ tcg_gen_deposit_z_i32(TCGV_HIGH(ret), TCGV_LOW(arg),
+ ofs - 32, len);
+ tcg_gen_movi_i32(TCGV_LOW(ret), 0);
+ return;
+ }
+ if (ofs + len <= 32) {
+ tcg_gen_deposit_z_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ return;
+ }
+ }
+ /* To help two-operand hosts we prefer to zero-extend first,
+ which allows ARG to stay live. */
+ switch (len) {
+ case 32:
+ if (TCG_TARGET_HAS_ext32u_i64) {
+ tcg_gen_ext32u_i64(ret, arg);
+ tcg_gen_shli_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 16:
+ if (TCG_TARGET_HAS_ext16u_i64) {
+ tcg_gen_ext16u_i64(ret, arg);
+ tcg_gen_shli_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8u_i64) {
+ tcg_gen_ext8u_i64(ret, arg);
+ tcg_gen_shli_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ }
+ /* Otherwise prefer zero-extension over AND for code size. */
+ switch (ofs + len) {
+ case 32:
+ if (TCG_TARGET_HAS_ext32u_i64) {
+ tcg_gen_shli_i64(ret, arg, ofs);
+ tcg_gen_ext32u_i64(ret, ret);
+ return;
+ }
+ break;
+ case 16:
+ if (TCG_TARGET_HAS_ext16u_i64) {
+ tcg_gen_shli_i64(ret, arg, ofs);
+ tcg_gen_ext16u_i64(ret, ret);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8u_i64) {
+ tcg_gen_shli_i64(ret, arg, ofs);
+ tcg_gen_ext8u_i64(ret, ret);
+ return;
+ }
+ break;
+ }
+ tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
+ tcg_gen_shli_i64(ret, ret, ofs);
+ }
+}
+
+void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len)
+{
+ tcg_debug_assert(ofs < 64);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= 64);
+ tcg_debug_assert(ofs + len <= 64);
+
+ /* Canonicalize certain special cases, even if extract is supported. */
+ if (ofs + len == 64) {
+ tcg_gen_shri_i64(ret, arg, 64 - len);
+ return;
+ }
+ if (ofs == 0) {
+ tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
+ return;
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ /* Look for a 32-bit extract within one of the two words. */
+ if (ofs >= 32) {
+ tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ return;
+ }
+ if (ofs + len <= 32) {
+ tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
+ tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
+ return;
+ }
+ /* The field is split across two words. One double-word
+ shift is better than two double-word shifts. */
+ goto do_shift_and;
+ }
+
+ if (TCG_TARGET_HAS_extract_i64
+ && TCG_TARGET_extract_i64_valid(ofs, len)) {
+ tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
+ return;
+ }
+
+ /* Assume that zero-extension, if available, is cheaper than a shift. */
+ switch (ofs + len) {
+ case 32:
+ if (TCG_TARGET_HAS_ext32u_i64) {
+ tcg_gen_ext32u_i64(ret, arg);
+ tcg_gen_shri_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 16:
+ if (TCG_TARGET_HAS_ext16u_i64) {
+ tcg_gen_ext16u_i64(ret, arg);
+ tcg_gen_shri_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8u_i64) {
+ tcg_gen_ext8u_i64(ret, arg);
+ tcg_gen_shri_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ }
+
+ /* ??? Ideally we'd know what values are available for immediate AND.
+ Assume that 8 bits are available, plus the special cases of 16 and 32,
+ so that we get ext8u, ext16u, and ext32u. */
+ switch (len) {
+ case 1 ... 8: case 16: case 32:
+ do_shift_and:
+ tcg_gen_shri_i64(ret, arg, ofs);
+ tcg_gen_andi_i64(ret, ret, (1ull << len) - 1);
+ break;
+ default:
+ tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
+ tcg_gen_shri_i64(ret, ret, 64 - len);
+ break;
+ }
+}
+
+void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len)
+{
+ tcg_debug_assert(ofs < 64);
+ tcg_debug_assert(len > 0);
+ tcg_debug_assert(len <= 64);
+ tcg_debug_assert(ofs + len <= 64);
+
+ /* Canonicalize certain special cases, even if sextract is supported. */
+ if (ofs + len == 64) {
+ tcg_gen_sari_i64(ret, arg, 64 - len);
+ return;
+ }
+ if (ofs == 0) {
+ switch (len) {
+ case 32:
+ tcg_gen_ext32s_i64(ret, arg);
+ return;
+ case 16:
+ tcg_gen_ext16s_i64(ret, arg);
+ return;
+ case 8:
+ tcg_gen_ext8s_i64(ret, arg);
+ return;
+ }
+ }
+
+ if (TCG_TARGET_REG_BITS == 32) {
+ /* Look for a 32-bit extract within one of the two words. */
+ if (ofs >= 32) {
+ tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len);
+ } else if (ofs + len <= 32) {
+ tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len);
+ } else if (ofs == 0) {
+ tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg));
+ tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32);
+ return;
+ } else if (len > 32) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ /* Extract the bits for the high word normally. */
+ tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32);
+ /* Shift the field down for the low part. */
+ tcg_gen_shri_i64(ret, arg, ofs);
+ /* Overwrite the shift into the high part. */
+ tcg_gen_mov_i32(TCGV_HIGH(ret), t);
+ tcg_temp_free_i32(t);
+ return;
+ } else {
+ /* Shift the field down for the low part, such that the
+ field sits at the MSB. */
+ tcg_gen_shri_i64(ret, arg, ofs + len - 32);
+ /* Shift the field down from the MSB, sign extending. */
+ tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_LOW(ret), 32 - len);
+ }
+ /* Sign-extend the field from 32 bits. */
+ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
+ return;
+ }
+
+ if (TCG_TARGET_HAS_sextract_i64
+ && TCG_TARGET_extract_i64_valid(ofs, len)) {
+ tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
+ return;
+ }
+
+ /* Assume that sign-extension, if available, is cheaper than a shift. */
+ switch (ofs + len) {
+ case 32:
+ if (TCG_TARGET_HAS_ext32s_i64) {
+ tcg_gen_ext32s_i64(ret, arg);
+ tcg_gen_sari_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 16:
+ if (TCG_TARGET_HAS_ext16s_i64) {
+ tcg_gen_ext16s_i64(ret, arg);
+ tcg_gen_sari_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8s_i64) {
+ tcg_gen_ext8s_i64(ret, arg);
+ tcg_gen_sari_i64(ret, ret, ofs);
+ return;
+ }
+ break;
+ }
+ switch (len) {
+ case 32:
+ if (TCG_TARGET_HAS_ext32s_i64) {
+ tcg_gen_shri_i64(ret, arg, ofs);
+ tcg_gen_ext32s_i64(ret, ret);
+ return;
+ }
+ break;
+ case 16:
+ if (TCG_TARGET_HAS_ext16s_i64) {
+ tcg_gen_shri_i64(ret, arg, ofs);
+ tcg_gen_ext16s_i64(ret, ret);
+ return;
+ }
+ break;
+ case 8:
+ if (TCG_TARGET_HAS_ext8s_i64) {
+ tcg_gen_shri_i64(ret, arg, ofs);
+ tcg_gen_ext8s_i64(ret, ret);
+ return;
+ }
+ break;
+ }
+ tcg_gen_shli_i64(ret, arg, 64 - len - ofs);
+ tcg_gen_sari_i64(ret, ret, 64 - len);
+}
+
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2)
{
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 6d044b7c5b..c68e300a68 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -286,12 +286,24 @@ void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
+void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
+void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
+void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
unsigned int ofs, unsigned int len);
+void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
+ unsigned int ofs, unsigned int len);
void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
@@ -463,12 +475,24 @@ void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
+void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
+void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
+void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
unsigned int ofs, unsigned int len);
+void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
+void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
+ unsigned int ofs, unsigned int len);
void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
@@ -946,11 +970,20 @@ void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
#define tcg_gen_nand_tl tcg_gen_nand_i64
#define tcg_gen_nor_tl tcg_gen_nor_i64
#define tcg_gen_orc_tl tcg_gen_orc_i64
+#define tcg_gen_clz_tl tcg_gen_clz_i64
+#define tcg_gen_ctz_tl tcg_gen_ctz_i64
+#define tcg_gen_clzi_tl tcg_gen_clzi_i64
+#define tcg_gen_ctzi_tl tcg_gen_ctzi_i64
+#define tcg_gen_clrsb_tl tcg_gen_clrsb_i64
+#define tcg_gen_ctpop_tl tcg_gen_ctpop_i64
#define tcg_gen_rotl_tl tcg_gen_rotl_i64
#define tcg_gen_rotli_tl tcg_gen_rotli_i64
#define tcg_gen_rotr_tl tcg_gen_rotr_i64
#define tcg_gen_rotri_tl tcg_gen_rotri_i64
#define tcg_gen_deposit_tl tcg_gen_deposit_i64
+#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i64
+#define tcg_gen_extract_tl tcg_gen_extract_i64
+#define tcg_gen_sextract_tl tcg_gen_sextract_i64
#define tcg_const_tl tcg_const_i64
#define tcg_const_local_tl tcg_const_local_i64
#define tcg_gen_movcond_tl tcg_gen_movcond_i64
@@ -1034,11 +1067,20 @@ void tcg_gen_atomic_xor_fetch_i64(TCGv_i64, TCGv, TCGv_i64, TCGArg, TCGMemOp);
#define tcg_gen_nand_tl tcg_gen_nand_i32
#define tcg_gen_nor_tl tcg_gen_nor_i32
#define tcg_gen_orc_tl tcg_gen_orc_i32
+#define tcg_gen_clz_tl tcg_gen_clz_i32
+#define tcg_gen_ctz_tl tcg_gen_ctz_i32
+#define tcg_gen_clzi_tl tcg_gen_clzi_i32
+#define tcg_gen_ctzi_tl tcg_gen_ctzi_i32
+#define tcg_gen_clrsb_tl tcg_gen_clrsb_i32
+#define tcg_gen_ctpop_tl tcg_gen_ctpop_i32
#define tcg_gen_rotl_tl tcg_gen_rotl_i32
#define tcg_gen_rotli_tl tcg_gen_rotli_i32
#define tcg_gen_rotr_tl tcg_gen_rotr_i32
#define tcg_gen_rotri_tl tcg_gen_rotri_i32
#define tcg_gen_deposit_tl tcg_gen_deposit_i32
+#define tcg_gen_deposit_z_tl tcg_gen_deposit_z_i32
+#define tcg_gen_extract_tl tcg_gen_extract_i32
+#define tcg_gen_sextract_tl tcg_gen_sextract_i32
#define tcg_const_tl tcg_const_i32
#define tcg_const_local_tl tcg_const_local_i32
#define tcg_gen_movcond_tl tcg_gen_movcond_i32
diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h
index 45528d2192..f06f89405e 100644
--- a/tcg/tcg-opc.h
+++ b/tcg/tcg-opc.h
@@ -77,6 +77,8 @@ DEF(sar_i32, 1, 2, 0, 0)
DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
+DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
+DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END)
@@ -102,6 +104,9 @@ DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32))
DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32))
+DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
+DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
+DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
DEF(movi_i64, 1, 0, 1, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
@@ -139,6 +144,8 @@ DEF(sar_i64, 1, 2, 0, IMPL64)
DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
+DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
+DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
/* size changing ops */
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
@@ -167,6 +174,9 @@ DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64))
DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64))
+DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64))
+DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64))
+DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64))
DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
diff --git a/tcg/tcg-runtime.h b/tcg/tcg-runtime.h
index 1deb86a099..114ea6fecf 100644
--- a/tcg/tcg-runtime.h
+++ b/tcg/tcg-runtime.h
@@ -15,6 +15,15 @@ DEF_HELPER_FLAGS_2(sar_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_2(mulsh_i64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
DEF_HELPER_FLAGS_2(muluh_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(clz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(ctz_i32, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(clz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(ctz_i64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(clrsb_i32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clrsb_i64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(ctpop_i32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(ctpop_i64, TCG_CALL_NO_RWG_SE, i64, i64)
+
DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
#ifdef CONFIG_SOFTMMU
diff --git a/tcg/tcg.c b/tcg/tcg.c
index aabf94f365..cb898f1636 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -62,6 +62,7 @@
/* Forward declarations for functions declared in tcg-target.inc.c and
used here. */
static void tcg_target_init(TCGContext *s);
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode);
static void tcg_target_qemu_prologue(TCGContext *s);
static void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend);
@@ -95,7 +96,8 @@ static void tcg_register_jit_int(void *buf, size_t size,
__attribute__((unused));
/* Forward declarations for functions declared and used in tcg-target.inc.c. */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str);
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type);
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
intptr_t arg2);
static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
@@ -319,6 +321,7 @@ static const TCGHelperInfo all_helpers[] = {
};
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
+static void process_op_defs(TCGContext *s);
void tcg_context_init(TCGContext *s)
{
@@ -362,6 +365,7 @@ void tcg_context_init(TCGContext *s)
}
tcg_target_init(s);
+ process_op_defs(s);
/* Reverse the order of the saved registers, assuming they're all at
the start of tcg_target_reg_alloc_order. */
@@ -1221,59 +1225,68 @@ static void sort_constraints(TCGOpDef *def, int start, int n)
}
}
-void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
+static void process_op_defs(TCGContext *s)
{
TCGOpcode op;
- TCGOpDef *def;
- const char *ct_str;
- int i, nb_args;
- for(;;) {
- if (tdefs->op == (TCGOpcode)-1)
- break;
- op = tdefs->op;
- tcg_debug_assert((unsigned)op < NB_OPS);
- def = &tcg_op_defs[op];
-#if defined(CONFIG_DEBUG_TCG)
- /* Duplicate entry in op definitions? */
- tcg_debug_assert(!def->used);
- def->used = 1;
-#endif
+ for (op = 0; op < NB_OPS; op++) {
+ TCGOpDef *def = &tcg_op_defs[op];
+ const TCGTargetOpDef *tdefs;
+ TCGType type;
+ int i, nb_args;
+
+ if (def->flags & TCG_OPF_NOT_PRESENT) {
+ continue;
+ }
+
nb_args = def->nb_iargs + def->nb_oargs;
- for(i = 0; i < nb_args; i++) {
- ct_str = tdefs->args_ct_str[i];
- /* Incomplete TCGTargetOpDef entry? */
+ if (nb_args == 0) {
+ continue;
+ }
+
+ tdefs = tcg_target_op_def(op);
+ /* Missing TCGTargetOpDef entry. */
+ tcg_debug_assert(tdefs != NULL);
+
+ type = (def->flags & TCG_OPF_64BIT ? TCG_TYPE_I64 : TCG_TYPE_I32);
+ for (i = 0; i < nb_args; i++) {
+ const char *ct_str = tdefs->args_ct_str[i];
+ /* Incomplete TCGTargetOpDef entry. */
tcg_debug_assert(ct_str != NULL);
+
tcg_regset_clear(def->args_ct[i].u.regs);
def->args_ct[i].ct = 0;
- if (ct_str[0] >= '0' && ct_str[0] <= '9') {
- int oarg;
- oarg = ct_str[0] - '0';
- tcg_debug_assert(oarg < def->nb_oargs);
- tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
- /* TCG_CT_ALIAS is for the output arguments. The input
- argument is tagged with TCG_CT_IALIAS. */
- def->args_ct[i] = def->args_ct[oarg];
- def->args_ct[oarg].ct = TCG_CT_ALIAS;
- def->args_ct[oarg].alias_index = i;
- def->args_ct[i].ct |= TCG_CT_IALIAS;
- def->args_ct[i].alias_index = oarg;
- } else {
- for(;;) {
- if (*ct_str == '\0')
- break;
- switch(*ct_str) {
- case 'i':
- def->args_ct[i].ct |= TCG_CT_CONST;
- ct_str++;
- break;
- default:
- if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) {
- fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n",
- ct_str, i, def->name);
- exit(1);
- }
+ while (*ct_str != '\0') {
+ switch(*ct_str) {
+ case '0' ... '9':
+ {
+ int oarg = *ct_str - '0';
+ tcg_debug_assert(ct_str == tdefs->args_ct_str[i]);
+ tcg_debug_assert(oarg < def->nb_oargs);
+ tcg_debug_assert(def->args_ct[oarg].ct & TCG_CT_REG);
+ /* TCG_CT_ALIAS is for the output arguments.
+ The input is tagged with TCG_CT_IALIAS. */
+ def->args_ct[i] = def->args_ct[oarg];
+ def->args_ct[oarg].ct |= TCG_CT_ALIAS;
+ def->args_ct[oarg].alias_index = i;
+ def->args_ct[i].ct |= TCG_CT_IALIAS;
+ def->args_ct[i].alias_index = oarg;
}
+ ct_str++;
+ break;
+ case '&':
+ def->args_ct[i].ct |= TCG_CT_NEWREG;
+ ct_str++;
+ break;
+ case 'i':
+ def->args_ct[i].ct |= TCG_CT_CONST;
+ ct_str++;
+ break;
+ default:
+ ct_str = target_parse_constraint(&def->args_ct[i],
+ ct_str, type);
+ /* Typo in TCGTargetOpDef constraint. */
+ tcg_debug_assert(ct_str != NULL);
}
}
}
@@ -1284,42 +1297,7 @@ void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs)
/* sort the constraints (XXX: this is just an heuristic) */
sort_constraints(def, 0, def->nb_oargs);
sort_constraints(def, def->nb_oargs, def->nb_iargs);
-
-#if 0
- {
- int i;
-
- printf("%s: sorted=", def->name);
- for(i = 0; i < def->nb_oargs + def->nb_iargs; i++)
- printf(" %d", def->sorted_args[i]);
- printf("\n");
- }
-#endif
- tdefs++;
}
-
-#if defined(CONFIG_DEBUG_TCG)
- i = 0;
- for (op = 0; op < tcg_op_defs_max; op++) {
- const TCGOpDef *def = &tcg_op_defs[op];
- if (def->flags & TCG_OPF_NOT_PRESENT) {
- /* Wrong entry in op definitions? */
- if (def->used) {
- fprintf(stderr, "Invalid op definition for %s\n", def->name);
- i = 1;
- }
- } else {
- /* Missing entry in op definitions? */
- if (!def->used) {
- fprintf(stderr, "Missing op definition for %s\n", def->name);
- i = 1;
- }
- }
- }
- if (i == 1) {
- tcg_abort();
- }
-#endif
}
void tcg_op_remove(TCGContext *s, TCGOp *op)
@@ -2208,7 +2186,8 @@ static void tcg_reg_alloc_op(TCGContext *s,
const TCGOpDef *def, TCGOpcode opc,
const TCGArg *args, TCGLifeData arg_life)
{
- TCGRegSet allocated_regs;
+ TCGRegSet i_allocated_regs;
+ TCGRegSet o_allocated_regs;
int i, k, nb_iargs, nb_oargs;
TCGReg reg;
TCGArg arg;
@@ -2225,8 +2204,10 @@ static void tcg_reg_alloc_op(TCGContext *s,
args + nb_oargs + nb_iargs,
sizeof(TCGArg) * def->nb_cargs);
+ tcg_regset_set(i_allocated_regs, s->reserved_regs);
+ tcg_regset_set(o_allocated_regs, s->reserved_regs);
+
/* satisfy input constraints */
- tcg_regset_set(allocated_regs, s->reserved_regs);
for(k = 0; k < nb_iargs; k++) {
i = def->sorted_args[nb_oargs + k];
arg = args[i];
@@ -2241,7 +2222,7 @@ static void tcg_reg_alloc_op(TCGContext *s,
goto iarg_end;
}
- temp_load(s, ts, arg_ct->u.regs, allocated_regs);
+ temp_load(s, ts, arg_ct->u.regs, i_allocated_regs);
if (arg_ct->ct & TCG_CT_IALIAS) {
if (ts->fixed_reg) {
@@ -2275,13 +2256,13 @@ static void tcg_reg_alloc_op(TCGContext *s,
allocate_in_reg:
/* allocate a new register matching the constraint
and move the temporary register into it */
- reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, i_allocated_regs,
ts->indirect_base);
tcg_out_mov(s, ts->type, reg, ts->reg);
}
new_args[i] = reg;
const_args[i] = 0;
- tcg_regset_set_reg(allocated_regs, reg);
+ tcg_regset_set_reg(i_allocated_regs, reg);
iarg_end: ;
}
@@ -2293,31 +2274,35 @@ static void tcg_reg_alloc_op(TCGContext *s,
}
if (def->flags & TCG_OPF_BB_END) {
- tcg_reg_alloc_bb_end(s, allocated_regs);
+ tcg_reg_alloc_bb_end(s, i_allocated_regs);
} else {
if (def->flags & TCG_OPF_CALL_CLOBBER) {
/* XXX: permit generic clobber register list ? */
for (i = 0; i < TCG_TARGET_NB_REGS; i++) {
if (tcg_regset_test_reg(tcg_target_call_clobber_regs, i)) {
- tcg_reg_free(s, i, allocated_regs);
+ tcg_reg_free(s, i, i_allocated_regs);
}
}
}
if (def->flags & TCG_OPF_SIDE_EFFECTS) {
/* sync globals if the op has side effects and might trigger
an exception. */
- sync_globals(s, allocated_regs);
+ sync_globals(s, i_allocated_regs);
}
/* satisfy the output constraints */
- tcg_regset_set(allocated_regs, s->reserved_regs);
for(k = 0; k < nb_oargs; k++) {
i = def->sorted_args[k];
arg = args[i];
arg_ct = &def->args_ct[i];
ts = &s->temps[arg];
- if (arg_ct->ct & TCG_CT_ALIAS) {
+ if ((arg_ct->ct & TCG_CT_ALIAS)
+ && !const_args[arg_ct->alias_index]) {
reg = new_args[arg_ct->alias_index];
+ } else if (arg_ct->ct & TCG_CT_NEWREG) {
+ reg = tcg_reg_alloc(s, arg_ct->u.regs,
+ i_allocated_regs | o_allocated_regs,
+ ts->indirect_base);
} else {
/* if fixed register, we try to use it */
reg = ts->reg;
@@ -2325,10 +2310,10 @@ static void tcg_reg_alloc_op(TCGContext *s,
tcg_regset_test_reg(arg_ct->u.regs, reg)) {
goto oarg_end;
}
- reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs,
+ reg = tcg_reg_alloc(s, arg_ct->u.regs, o_allocated_regs,
ts->indirect_base);
}
- tcg_regset_set_reg(allocated_regs, reg);
+ tcg_regset_set_reg(o_allocated_regs, reg);
/* if a fixed register is used, then a move will be done afterwards */
if (!ts->fixed_reg) {
if (ts->val_type == TEMP_VAL_REG) {
@@ -2357,7 +2342,7 @@ static void tcg_reg_alloc_op(TCGContext *s,
tcg_out_mov(s, ts->type, ts->reg, reg);
}
if (NEED_SYNC_ARG(i)) {
- temp_sync(s, ts, allocated_regs, IS_DEAD_ARG(i));
+ temp_sync(s, ts, o_allocated_regs, IS_DEAD_ARG(i));
} else if (IS_DEAD_ARG(i)) {
temp_dead(s, ts);
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index a35e4c4fd4..631c6f69b1 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -111,7 +111,12 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_eqv_i64 0
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 0
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i64 0
#define TCG_TARGET_HAS_deposit_i64 0
+#define TCG_TARGET_HAS_extract_i64 0
+#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_movcond_i64 0
#define TCG_TARGET_HAS_add2_i64 0
#define TCG_TARGET_HAS_sub2_i64 0
@@ -130,6 +135,12 @@ typedef uint64_t TCGRegSet;
#ifndef TCG_TARGET_deposit_i64_valid
#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
#endif
+#ifndef TCG_TARGET_extract_i32_valid
+#define TCG_TARGET_extract_i32_valid(ofs, len) 1
+#endif
+#ifndef TCG_TARGET_extract_i64_valid
+#define TCG_TARGET_extract_i64_valid(ofs, len) 1
+#endif
/* Only one of DIV or DIV2 should be defined. */
#if defined(TCG_TARGET_HAS_div_i32)
@@ -843,6 +854,7 @@ void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
#define TCG_CT_ALIAS 0x80
#define TCG_CT_IALIAS 0x40
+#define TCG_CT_NEWREG 0x20 /* output requires a new register */
#define TCG_CT_REG 0x01
#define TCG_CT_CONST 0x02 /* any constant of register size */
@@ -897,8 +909,6 @@ do {\
abort();\
} while (0)
-void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);
-
#if UINTPTR_MAX == UINT32_MAX
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 868228b2e7..838bf3a858 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -69,9 +69,14 @@
#define TCG_TARGET_HAS_ext16u_i32 1
#define TCG_TARGET_HAS_andc_i32 0
#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 0
+#define TCG_TARGET_HAS_sextract_i32 0
#define TCG_TARGET_HAS_eqv_i32 0
#define TCG_TARGET_HAS_nand_i32 0
#define TCG_TARGET_HAS_nor_i32 0
+#define TCG_TARGET_HAS_clz_i32 0
+#define TCG_TARGET_HAS_ctz_i32 0
+#define TCG_TARGET_HAS_ctpop_i32 0
#define TCG_TARGET_HAS_neg_i32 1
#define TCG_TARGET_HAS_not_i32 1
#define TCG_TARGET_HAS_orc_i32 0
@@ -88,6 +93,8 @@
#define TCG_TARGET_HAS_bswap32_i64 1
#define TCG_TARGET_HAS_bswap64_i64 1
#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 0
+#define TCG_TARGET_HAS_sextract_i64 0
#define TCG_TARGET_HAS_div_i64 0
#define TCG_TARGET_HAS_rem_i64 0
#define TCG_TARGET_HAS_ext8s_i64 1
@@ -100,6 +107,9 @@
#define TCG_TARGET_HAS_eqv_i64 0
#define TCG_TARGET_HAS_nand_i64 0
#define TCG_TARGET_HAS_nor_i64 0
+#define TCG_TARGET_HAS_clz_i64 0
+#define TCG_TARGET_HAS_ctz_i64 0
+#define TCG_TARGET_HAS_ctpop_i64 0
#define TCG_TARGET_HAS_neg_i64 1
#define TCG_TARGET_HAS_not_i64 1
#define TCG_TARGET_HAS_orc_i64 0
diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c
index 9dbf4d5512..26ee9b1664 100644
--- a/tcg/tci/tcg-target.inc.c
+++ b/tcg/tci/tcg-target.inc.c
@@ -259,6 +259,18 @@ static const TCGTargetOpDef tcg_target_op_defs[] = {
{ -1 },
};
+static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
+{
+ int i, n = ARRAY_SIZE(tcg_target_op_defs);
+
+ for (i = 0; i < n; ++i) {
+ if (tcg_target_op_defs[i].op == op) {
+ return &tcg_target_op_defs[i];
+ }
+ }
+ return NULL;
+}
+
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_R0,
TCG_REG_R1,
@@ -372,10 +384,10 @@ static void patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* Parse target specific constraints. */
-static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
+static const char *target_parse_constraint(TCGArgConstraint *ct,
+ const char *ct_str, TCGType type)
{
- const char *ct_str = *pct_str;
- switch (ct_str[0]) {
+ switch (*ct_str++) {
case 'r':
case 'L': /* qemu_ld constraint */
case 'S': /* qemu_st constraint */
@@ -383,11 +395,9 @@ static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1);
break;
default:
- return -1;
+ return NULL;
}
- ct_str++;
- *pct_str = ct_str;
- return 0;
+ return ct_str;
}
#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
@@ -875,7 +885,6 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
- tcg_add_target_add_op_defs(tcg_target_op_defs);
/* We use negative offsets from "sp" so that we can distinguish
stores that might pretend to be call arguments. */
diff --git a/tests/.gitignore b/tests/.gitignore
index e9b182e2bd..7357d0a0d4 100644
--- a/tests/.gitignore
+++ b/tests/.gitignore
@@ -13,6 +13,7 @@ rcutorture
test-aio
test-base64
test-bitops
+test-bitcnt
test-blockjob
test-blockjob-txn
test-bufferiszero
diff --git a/tests/Makefile.include b/tests/Makefile.include
index 4841d582a1..22ea256e94 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -81,6 +81,7 @@ gcov-files-test-qht-y = util/qht.c
check-unit-y += tests/test-qht-par$(EXESUF)
gcov-files-test-qht-par-y = util/qht.c
check-unit-y += tests/test-bitops$(EXESUF)
+check-unit-y += tests/test-bitcnt$(EXESUF)
check-unit-$(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) += tests/test-qdev-global-props$(EXESUF)
check-unit-y += tests/check-qom-interface$(EXESUF)
gcov-files-check-qom-interface-y = qom/object.c
@@ -351,6 +352,24 @@ qapi-schema += base-cycle-direct.json
qapi-schema += base-cycle-indirect.json
qapi-schema += command-int.json
qapi-schema += comments.json
+qapi-schema += doc-bad-args.json
+qapi-schema += doc-bad-symbol.json
+qapi-schema += doc-duplicated-arg.json
+qapi-schema += doc-duplicated-return.json
+qapi-schema += doc-duplicated-since.json
+qapi-schema += doc-empty-arg.json
+qapi-schema += doc-empty-section.json
+qapi-schema += doc-empty-symbol.json
+qapi-schema += doc-interleaved-section.json
+qapi-schema += doc-invalid-end.json
+qapi-schema += doc-invalid-end2.json
+qapi-schema += doc-invalid-return.json
+qapi-schema += doc-invalid-section.json
+qapi-schema += doc-invalid-start.json
+qapi-schema += doc-missing-colon.json
+qapi-schema += doc-missing-expr.json
+qapi-schema += doc-missing-space.json
+qapi-schema += doc-optional.json
qapi-schema += double-data.json
qapi-schema += double-type.json
qapi-schema += duplicate-key.json
@@ -444,6 +463,8 @@ qapi-schema += union-optional-branch.json
qapi-schema += union-unknown.json
qapi-schema += unknown-escape.json
qapi-schema += unknown-expr-key.json
+
+
check-qapi-schema-y := $(addprefix tests/qapi-schema/, $(qapi-schema))
GENERATED_HEADERS += tests/test-qapi-types.h tests/test-qapi-visit.h \
@@ -516,6 +537,7 @@ tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
hw/core/bus.o \
hw/core/irq.o \
hw/core/fw-path-provider.o \
+ hw/core/reset.o \
$(test-qapi-obj-y)
tests/test-vmstate$(EXESUF): tests/test-vmstate.o \
migration/vmstate.o migration/qemu-file.o \
@@ -571,6 +593,7 @@ tests/test-opts-visitor$(EXESUF): tests/test-opts-visitor.o $(test-qapi-obj-y)
tests/test-mul64$(EXESUF): tests/test-mul64.o $(test-util-obj-y)
tests/test-bitops$(EXESUF): tests/test-bitops.o $(test-util-obj-y)
+tests/test-bitcnt$(EXESUF): tests/test-bitcnt.o $(test-util-obj-y)
tests/test-crypto-hash$(EXESUF): tests/test-crypto-hash.o $(test-crypto-obj-y)
tests/test-crypto-hmac$(EXESUF): tests/test-crypto-hmac.o $(test-crypto-obj-y)
tests/test-crypto-cipher$(EXESUF): tests/test-crypto-cipher.o $(test-crypto-obj-y)
@@ -689,7 +712,7 @@ tests/test-filter-mirror$(EXESUF): tests/test-filter-mirror.o $(qtest-obj-y)
tests/test-filter-redirector$(EXESUF): tests/test-filter-redirector.o $(qtest-obj-y)
tests/test-x86-cpuid-compat$(EXESUF): tests/test-x86-cpuid-compat.o $(qtest-obj-y)
tests/ivshmem-test$(EXESUF): tests/ivshmem-test.o contrib/ivshmem-server/ivshmem-server.o $(libqos-pc-obj-y)
-tests/vhost-user-bridge$(EXESUF): tests/vhost-user-bridge.o
+tests/vhost-user-bridge$(EXESUF): tests/vhost-user-bridge.o contrib/libvhost-user/libvhost-user.o $(test-util-obj-y)
tests/test-uuid$(EXESUF): tests/test-uuid.o $(test-util-obj-y)
tests/test-arm-mptimer$(EXESUF): tests/test-arm-mptimer.o
diff --git a/tests/acpi-test-data/pc/DSDT b/tests/acpi-test-data/pc/DSDT
index 8053d71105..15c3135d65 100644
--- a/tests/acpi-test-data/pc/DSDT
+++ b/tests/acpi-test-data/pc/DSDT
Binary files differ
diff --git a/tests/acpi-test-data/pc/DSDT.bridge b/tests/acpi-test-data/pc/DSDT.bridge
index 850e71a973..d38586c95b 100644
--- a/tests/acpi-test-data/pc/DSDT.bridge
+++ b/tests/acpi-test-data/pc/DSDT.bridge
Binary files differ
diff --git a/tests/acpi-test-data/pc/DSDT.cphp b/tests/acpi-test-data/pc/DSDT.cphp
index 9f405cfd83..2dd70bf952 100644
--- a/tests/acpi-test-data/pc/DSDT.cphp
+++ b/tests/acpi-test-data/pc/DSDT.cphp
Binary files differ
diff --git a/tests/acpi-test-data/pc/DSDT.ipmikcs b/tests/acpi-test-data/pc/DSDT.ipmikcs
index 8ac48afb6a..2796d96b0e 100644
--- a/tests/acpi-test-data/pc/DSDT.ipmikcs
+++ b/tests/acpi-test-data/pc/DSDT.ipmikcs
Binary files differ
diff --git a/tests/acpi-test-data/pc/DSDT.memhp b/tests/acpi-test-data/pc/DSDT.memhp
new file mode 100644
index 0000000000..53f6d58243
--- /dev/null
+++ b/tests/acpi-test-data/pc/DSDT.memhp
Binary files differ
diff --git a/tests/acpi-test-data/pc/SRAT.memhp b/tests/acpi-test-data/pc/SRAT.memhp
new file mode 100644
index 0000000000..66ce9a8981
--- /dev/null
+++ b/tests/acpi-test-data/pc/SRAT.memhp
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT b/tests/acpi-test-data/q35/DSDT
index 58fbb3d2e2..d11567c3dc 100644
--- a/tests/acpi-test-data/q35/DSDT
+++ b/tests/acpi-test-data/q35/DSDT
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT.bridge b/tests/acpi-test-data/q35/DSDT.bridge
index c392802a95..412a6e9104 100644
--- a/tests/acpi-test-data/q35/DSDT.bridge
+++ b/tests/acpi-test-data/q35/DSDT.bridge
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT.cphp b/tests/acpi-test-data/q35/DSDT.cphp
index a0ce6b3264..79902d0d30 100644
--- a/tests/acpi-test-data/q35/DSDT.cphp
+++ b/tests/acpi-test-data/q35/DSDT.cphp
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT.ipmibt b/tests/acpi-test-data/q35/DSDT.ipmibt
index 0ea38e1e72..b658329c5b 100644
--- a/tests/acpi-test-data/q35/DSDT.ipmibt
+++ b/tests/acpi-test-data/q35/DSDT.ipmibt
Binary files differ
diff --git a/tests/acpi-test-data/q35/DSDT.memhp b/tests/acpi-test-data/q35/DSDT.memhp
new file mode 100644
index 0000000000..e46c1fb5a2
--- /dev/null
+++ b/tests/acpi-test-data/q35/DSDT.memhp
Binary files differ
diff --git a/tests/acpi-test-data/q35/SRAT.memhp b/tests/acpi-test-data/q35/SRAT.memhp
new file mode 100644
index 0000000000..66ce9a8981
--- /dev/null
+++ b/tests/acpi-test-data/q35/SRAT.memhp
Binary files differ
diff --git a/tests/bios-tables-test.c b/tests/bios-tables-test.c
index 812f830539..54048050c0 100644
--- a/tests/bios-tables-test.c
+++ b/tests/bios-tables-test.c
@@ -867,6 +867,28 @@ static void test_acpi_piix4_tcg_ipmi(void)
free_test_data(&data);
}
+static void test_acpi_q35_tcg_memhp(void)
+{
+ test_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.machine = MACHINE_Q35;
+ data.variant = ".memhp";
+ test_acpi_one(" -m 128,slots=3,maxmem=1G -numa node", &data);
+ free_test_data(&data);
+}
+
+static void test_acpi_piix4_tcg_memhp(void)
+{
+ test_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.machine = MACHINE_PC;
+ data.variant = ".memhp";
+ test_acpi_one(" -m 128,slots=3,maxmem=1G -numa node", &data);
+ free_test_data(&data);
+}
+
int main(int argc, char *argv[])
{
const char *arch = qtest_get_arch();
@@ -887,6 +909,8 @@ int main(int argc, char *argv[])
qtest_add_func("acpi/q35/ipmi", test_acpi_q35_tcg_ipmi);
qtest_add_func("acpi/piix4/cpuhp", test_acpi_piix4_tcg_cphp);
qtest_add_func("acpi/q35/cpuhp", test_acpi_q35_tcg_cphp);
+ qtest_add_func("acpi/piix4/memhp", test_acpi_piix4_tcg_memhp);
+ qtest_add_func("acpi/q35/memhp", test_acpi_q35_tcg_memhp);
}
ret = g_test_run();
boot_sector_cleanup(disk);
diff --git a/tests/device-introspect-test.c b/tests/device-introspect-test.c
index 37debc11f9..c5637cc406 100644
--- a/tests/device-introspect-test.c
+++ b/tests/device-introspect-test.c
@@ -20,18 +20,24 @@
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "qapi/qmp/qstring.h"
+#include "qapi/qmp/qbool.h"
+#include "qapi/qmp/qdict.h"
#include "libqtest.h"
const char common_args[] = "-nodefaults -machine none";
-static QList *device_type_list(bool abstract)
+static QList *qom_list_types(const char *implements, bool abstract)
{
QDict *resp;
QList *ret;
+ QDict *args = qdict_new();
+ qdict_put(args, "abstract", qbool_from_bool(abstract));
+ if (implements) {
+ qdict_put(args, "implements", qstring_from_str(implements));
+ }
resp = qmp("{'execute': 'qom-list-types',"
- " 'arguments': {'implements': 'device', 'abstract': %i}}",
- abstract);
+ " 'arguments': %p }", args);
g_assert(qdict_haskey(resp, "return"));
ret = qdict_get_qlist(resp, "return");
QINCREF(ret);
@@ -39,6 +45,11 @@ static QList *device_type_list(bool abstract)
return ret;
}
+static QList *device_type_list(bool abstract)
+{
+ return qom_list_types("device", abstract);
+}
+
static void test_one_device(const char *type)
{
QDict *resp;
@@ -110,6 +121,48 @@ static void test_device_intro_concrete(void)
qtest_end();
}
+static void test_abstract_interfaces(void)
+{
+ QList *all_types;
+ QList *obj_types;
+ QListEntry *ae;
+
+ qtest_start(common_args);
+ /* qom-list-types implements=interface would return any type
+ * that implements _any_ interface (not just interface types),
+ * so use a trick to find the interface type names:
+ * - list all object types
+ * - list all types, and look for items that are not
+ * on the first list
+ */
+ all_types = qom_list_types(NULL, false);
+ obj_types = qom_list_types("object", false);
+
+ QLIST_FOREACH_ENTRY(all_types, ae) {
+ QDict *at = qobject_to_qdict(qlist_entry_obj(ae));
+ const char *aname = qdict_get_str(at, "name");
+ QListEntry *oe;
+ const char *found = NULL;
+
+ QLIST_FOREACH_ENTRY(obj_types, oe) {
+ QDict *ot = qobject_to_qdict(qlist_entry_obj(oe));
+ const char *oname = qdict_get_str(ot, "name");
+ if (!strcmp(aname, oname)) {
+ found = oname;
+ break;
+ }
+ }
+
+ /* Using g_assert_cmpstr() will give more useful failure
+ * messages than g_assert(found) */
+ g_assert_cmpstr(aname, ==, found);
+ }
+
+ QDECREF(all_types);
+ QDECREF(obj_types);
+ qtest_end();
+}
+
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
@@ -118,6 +171,7 @@ int main(int argc, char **argv)
qtest_add_func("device/introspect/none", test_device_intro_none);
qtest_add_func("device/introspect/abstract", test_device_intro_abstract);
qtest_add_func("device/introspect/concrete", test_device_intro_concrete);
+ qtest_add_func("device/introspect/abstract-interfaces", test_abstract_interfaces);
return g_test_run();
}
diff --git a/tests/libqtest.c b/tests/libqtest.c
index 6f6975248f..d8fba6647a 100644
--- a/tests/libqtest.c
+++ b/tests/libqtest.c
@@ -768,6 +768,10 @@ void qtest_memread(QTestState *s, uint64_t addr, void *data, size_t size)
gchar **args;
size_t i;
+ if (!size) {
+ return;
+ }
+
qtest_sendf(s, "read 0x%" PRIx64 " 0x%zx\n", addr, size);
args = qtest_rsp(s, 2);
@@ -858,7 +862,13 @@ void qtest_memwrite(QTestState *s, uint64_t addr, const void *data, size_t size)
{
const uint8_t *ptr = data;
size_t i;
- char *enc = g_malloc(2 * size + 1);
+ char *enc;
+
+ if (!size) {
+ return;
+ }
+
+ enc = g_malloc(2 * size + 1);
for (i = 0; i < size; i++) {
sprintf(&enc[i * 2], "%02x", ptr[i]);
diff --git a/tests/m25p80-test.c b/tests/m25p80-test.c
index cb7ec81f1a..244aa33dd9 100644
--- a/tests/m25p80-test.c
+++ b/tests/m25p80-test.c
@@ -36,6 +36,9 @@
#define CRTL_EXTENDED0 0 /* 32 bit addressing for SPI */
#define R_CTRL0 0x10
#define CTRL_CE_STOP_ACTIVE (1 << 2)
+#define CTRL_READMODE 0x0
+#define CTRL_FREADMODE 0x1
+#define CTRL_WRITEMODE 0x2
#define CTRL_USERMODE 0x3
#define ASPEED_FMC_BASE 0x1E620000
@@ -50,6 +53,8 @@ enum {
READ = 0x03,
PP = 0x02,
WREN = 0x6,
+ RESET_ENABLE = 0x66,
+ RESET_MEMORY = 0x99,
EN_4BYTE_ADDR = 0xB7,
ERASE_SECTOR = 0xd8,
};
@@ -76,6 +81,30 @@ static void spi_conf(uint32_t value)
writel(ASPEED_FMC_BASE + R_CONF, conf);
}
+static void spi_conf_remove(uint32_t value)
+{
+ uint32_t conf = readl(ASPEED_FMC_BASE + R_CONF);
+
+ conf &= ~value;
+ writel(ASPEED_FMC_BASE + R_CONF, conf);
+}
+
+static void spi_ce_ctrl(uint32_t value)
+{
+ uint32_t conf = readl(ASPEED_FMC_BASE + R_CE_CTRL);
+
+ conf |= value;
+ writel(ASPEED_FMC_BASE + R_CE_CTRL, conf);
+}
+
+static void spi_ctrl_setmode(uint8_t mode, uint8_t cmd)
+{
+ uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
+ ctrl &= ~(CTRL_USERMODE | 0xff << 16);
+ ctrl |= mode | (cmd << 16);
+ writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
+}
+
static void spi_ctrl_start_user(void)
{
uint32_t ctrl = readl(ASPEED_FMC_BASE + R_CTRL0);
@@ -95,6 +124,18 @@ static void spi_ctrl_stop_user(void)
writel(ASPEED_FMC_BASE + R_CTRL0, ctrl);
}
+static void flash_reset(void)
+{
+ spi_conf(CONF_ENABLE_W0);
+
+ spi_ctrl_start_user();
+ writeb(ASPEED_FLASH_BASE, RESET_ENABLE);
+ writeb(ASPEED_FLASH_BASE, RESET_MEMORY);
+ spi_ctrl_stop_user();
+
+ spi_conf_remove(CONF_ENABLE_W0);
+}
+
static void test_read_jedec(void)
{
uint32_t jedec = 0x0;
@@ -108,6 +149,8 @@ static void test_read_jedec(void)
jedec |= readb(ASPEED_FLASH_BASE);
spi_ctrl_stop_user();
+ flash_reset();
+
g_assert_cmphex(jedec, ==, FLASH_JEDEC);
}
@@ -128,6 +171,18 @@ static void read_page(uint32_t addr, uint32_t *page)
spi_ctrl_stop_user();
}
+static void read_page_mem(uint32_t addr, uint32_t *page)
+{
+ int i;
+
+ /* move out USER mode to use direct reads from the AHB bus */
+ spi_ctrl_setmode(CTRL_READMODE, READ);
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ page[i] = make_be32(readl(ASPEED_FLASH_BASE + addr + i * 4));
+ }
+}
+
static void test_erase_sector(void)
{
uint32_t some_page_addr = 0x600 * PAGE_SIZE;
@@ -155,6 +210,8 @@ static void test_erase_sector(void)
for (i = 0; i < PAGE_SIZE / 4; i++) {
g_assert_cmphex(page[i], ==, 0xffffffff);
}
+
+ flash_reset();
}
static void test_erase_all(void)
@@ -182,6 +239,8 @@ static void test_erase_all(void)
for (i = 0; i < PAGE_SIZE / 4; i++) {
g_assert_cmphex(page[i], ==, 0xffffffff);
}
+
+ flash_reset();
}
static void test_write_page(void)
@@ -195,6 +254,7 @@ static void test_write_page(void)
spi_ctrl_start_user();
writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
+ writeb(ASPEED_FLASH_BASE, WREN);
writeb(ASPEED_FLASH_BASE, PP);
writel(ASPEED_FLASH_BASE, make_be32(my_page_addr));
@@ -215,6 +275,77 @@ static void test_write_page(void)
for (i = 0; i < PAGE_SIZE / 4; i++) {
g_assert_cmphex(page[i], ==, 0xffffffff);
}
+
+ flash_reset();
+}
+
+static void test_read_page_mem(void)
+{
+ uint32_t my_page_addr = 0x14000 * PAGE_SIZE; /* beyond 16MB */
+ uint32_t some_page_addr = 0x15000 * PAGE_SIZE;
+ uint32_t page[PAGE_SIZE / 4];
+ int i;
+
+ /* Enable 4BYTE mode for controller. This is should be strapped by
+ * HW for CE0 anyhow.
+ */
+ spi_ce_ctrl(1 << CRTL_EXTENDED0);
+
+ /* Enable 4BYTE mode for flash. */
+ spi_conf(CONF_ENABLE_W0);
+ spi_ctrl_start_user();
+ writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
+ spi_ctrl_stop_user();
+ spi_conf_remove(CONF_ENABLE_W0);
+
+ /* Check what was written */
+ read_page_mem(my_page_addr, page);
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ /* Check some other page. It should be full of 0xff */
+ read_page_mem(some_page_addr, page);
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, 0xffffffff);
+ }
+
+ flash_reset();
+}
+
+static void test_write_page_mem(void)
+{
+ uint32_t my_page_addr = 0x15000 * PAGE_SIZE;
+ uint32_t page[PAGE_SIZE / 4];
+ int i;
+
+ /* Enable 4BYTE mode for controller. This is should be strapped by
+ * HW for CE0 anyhow.
+ */
+ spi_ce_ctrl(1 << CRTL_EXTENDED0);
+
+ /* Enable 4BYTE mode for flash. */
+ spi_conf(CONF_ENABLE_W0);
+ spi_ctrl_start_user();
+ writeb(ASPEED_FLASH_BASE, EN_4BYTE_ADDR);
+ writeb(ASPEED_FLASH_BASE, WREN);
+ spi_ctrl_stop_user();
+
+ /* move out USER mode to use direct writes to the AHB bus */
+ spi_ctrl_setmode(CTRL_WRITEMODE, PP);
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ writel(ASPEED_FLASH_BASE + my_page_addr + i * 4,
+ make_be32(my_page_addr + i * 4));
+ }
+
+ /* Check what was written */
+ read_page_mem(my_page_addr, page);
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ g_assert_cmphex(page[i], ==, my_page_addr + i * 4);
+ }
+
+ flash_reset();
}
static char tmp_path[] = "/tmp/qtest.m25p80.XXXXXX";
@@ -242,6 +373,8 @@ int main(int argc, char **argv)
qtest_add_func("/m25p80/erase_sector", test_erase_sector);
qtest_add_func("/m25p80/erase_all", test_erase_all);
qtest_add_func("/m25p80/write_page", test_write_page);
+ qtest_add_func("/m25p80/read_page_mem", test_read_page_mem);
+ qtest_add_func("/m25p80/write_page_mem", test_write_page_mem);
ret = g_test_run();
diff --git a/tests/qapi-schema/alternate-any.err b/tests/qapi-schema/alternate-any.err
index aaa0154731..395c8ab583 100644
--- a/tests/qapi-schema/alternate-any.err
+++ b/tests/qapi-schema/alternate-any.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-any.json:2: Alternate 'Alt' member 'one' cannot use type 'any'
+tests/qapi-schema/alternate-any.json:6: Alternate 'Alt' member 'one' cannot use type 'any'
diff --git a/tests/qapi-schema/alternate-any.json b/tests/qapi-schema/alternate-any.json
index e47a73a116..c958776767 100644
--- a/tests/qapi-schema/alternate-any.json
+++ b/tests/qapi-schema/alternate-any.json
@@ -1,4 +1,8 @@
# we do not allow the 'any' type as an alternate branch
+
+##
+# @Alt:
+##
{ 'alternate': 'Alt',
'data': { 'one': 'any',
'two': 'int' } }
diff --git a/tests/qapi-schema/alternate-array.err b/tests/qapi-schema/alternate-array.err
index 7b930c64ab..09628e9755 100644
--- a/tests/qapi-schema/alternate-array.err
+++ b/tests/qapi-schema/alternate-array.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-array.json:5: Member 'two' of alternate 'Alt' cannot be an array
+tests/qapi-schema/alternate-array.json:12: Member 'two' of alternate 'Alt' cannot be an array
diff --git a/tests/qapi-schema/alternate-array.json b/tests/qapi-schema/alternate-array.json
index f241aac122..c2f98ad608 100644
--- a/tests/qapi-schema/alternate-array.json
+++ b/tests/qapi-schema/alternate-array.json
@@ -1,7 +1,14 @@
# we do not allow array branches in alternates
+
+##
+# @One:
+##
# TODO: should we support this?
{ 'struct': 'One',
'data': { 'name': 'str' } }
+##
+# @Alt:
+##
{ 'alternate': 'Alt',
'data': { 'one': 'One',
'two': [ 'int' ] } }
diff --git a/tests/qapi-schema/alternate-base.err b/tests/qapi-schema/alternate-base.err
index 30d8a34373..3b679140e0 100644
--- a/tests/qapi-schema/alternate-base.err
+++ b/tests/qapi-schema/alternate-base.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-base.json:4: Unknown key 'base' in alternate 'Alt'
+tests/qapi-schema/alternate-base.json:11: Unknown key 'base' in alternate 'Alt'
diff --git a/tests/qapi-schema/alternate-base.json b/tests/qapi-schema/alternate-base.json
index 529430ecf2..9612b7925d 100644
--- a/tests/qapi-schema/alternate-base.json
+++ b/tests/qapi-schema/alternate-base.json
@@ -1,6 +1,13 @@
# we reject alternate with base type
+
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'string': 'str' } }
+##
+# @Alt:
+##
{ 'alternate': 'Alt',
'base': 'Base',
'data': { 'number': 'int' } }
diff --git a/tests/qapi-schema/alternate-clash.err b/tests/qapi-schema/alternate-clash.err
index 604d8495eb..f07c3e8ad3 100644
--- a/tests/qapi-schema/alternate-clash.err
+++ b/tests/qapi-schema/alternate-clash.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-clash.json:7: 'a_b' (branch of Alt1) collides with 'a-b' (branch of Alt1)
+tests/qapi-schema/alternate-clash.json:11: 'a_b' (branch of Alt1) collides with 'a-b' (branch of Alt1)
diff --git a/tests/qapi-schema/alternate-clash.json b/tests/qapi-schema/alternate-clash.json
index 6d73bc527b..97ca7c80e7 100644
--- a/tests/qapi-schema/alternate-clash.json
+++ b/tests/qapi-schema/alternate-clash.json
@@ -4,5 +4,9 @@
# TODO: In the future, if alternates are simplified to not generate
# the implicit Alt1Kind enum, we would still have a collision with the
# resulting C union trying to have two members named 'a_b'.
+
+##
+# @Alt1:
+##
{ 'alternate': 'Alt1',
'data': { 'a-b': 'str', 'a_b': 'int' } }
diff --git a/tests/qapi-schema/alternate-conflict-dict.err b/tests/qapi-schema/alternate-conflict-dict.err
index 0f411f4faf..7cb023fdd8 100644
--- a/tests/qapi-schema/alternate-conflict-dict.err
+++ b/tests/qapi-schema/alternate-conflict-dict.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-conflict-dict.json:6: Alternate 'Alt' member 'two' can't be distinguished from member 'one'
+tests/qapi-schema/alternate-conflict-dict.json:16: Alternate 'Alt' member 'two' can't be distinguished from member 'one'
diff --git a/tests/qapi-schema/alternate-conflict-dict.json b/tests/qapi-schema/alternate-conflict-dict.json
index d566cca816..9f9d97fa2e 100644
--- a/tests/qapi-schema/alternate-conflict-dict.json
+++ b/tests/qapi-schema/alternate-conflict-dict.json
@@ -1,8 +1,18 @@
# we reject alternates with multiple object branches
+
+##
+# @One:
+##
{ 'struct': 'One',
'data': { 'name': 'str' } }
+##
+# @Two:
+##
{ 'struct': 'Two',
'data': { 'value': 'int' } }
+##
+# @Alt:
+##
{ 'alternate': 'Alt',
'data': { 'one': 'One',
'two': 'Two' } }
diff --git a/tests/qapi-schema/alternate-conflict-string.err b/tests/qapi-schema/alternate-conflict-string.err
index fc523b0879..6dbbacd1d2 100644
--- a/tests/qapi-schema/alternate-conflict-string.err
+++ b/tests/qapi-schema/alternate-conflict-string.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-conflict-string.json:4: Alternate 'Alt' member 'two' can't be distinguished from member 'one'
+tests/qapi-schema/alternate-conflict-string.json:11: Alternate 'Alt' member 'two' can't be distinguished from member 'one'
diff --git a/tests/qapi-schema/alternate-conflict-string.json b/tests/qapi-schema/alternate-conflict-string.json
index 72f04a820a..12aafab808 100644
--- a/tests/qapi-schema/alternate-conflict-string.json
+++ b/tests/qapi-schema/alternate-conflict-string.json
@@ -1,6 +1,13 @@
# we reject alternates with multiple string-like branches
+
+##
+# @Enum:
+##
{ 'enum': 'Enum',
'data': [ 'hello', 'world' ] }
+##
+# @Alt:
+##
{ 'alternate': 'Alt',
'data': { 'one': 'str',
'two': 'Enum' } }
diff --git a/tests/qapi-schema/alternate-empty.err b/tests/qapi-schema/alternate-empty.err
index bb06c5bfec..8245ce3103 100644
--- a/tests/qapi-schema/alternate-empty.err
+++ b/tests/qapi-schema/alternate-empty.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-empty.json:2: Alternate 'Alt' should have at least two branches in 'data'
+tests/qapi-schema/alternate-empty.json:6: Alternate 'Alt' should have at least two branches in 'data'
diff --git a/tests/qapi-schema/alternate-empty.json b/tests/qapi-schema/alternate-empty.json
index fff15baf16..db54405240 100644
--- a/tests/qapi-schema/alternate-empty.json
+++ b/tests/qapi-schema/alternate-empty.json
@@ -1,2 +1,6 @@
# alternates must list at least two types to be useful
+
+##
+# @Alt:
+##
{ 'alternate': 'Alt', 'data': { 'i': 'int' } }
diff --git a/tests/qapi-schema/alternate-nested.err b/tests/qapi-schema/alternate-nested.err
index 4d1187e60e..1804ffbf47 100644
--- a/tests/qapi-schema/alternate-nested.err
+++ b/tests/qapi-schema/alternate-nested.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-nested.json:4: Member 'nested' of alternate 'Alt2' cannot use alternate type 'Alt1'
+tests/qapi-schema/alternate-nested.json:11: Member 'nested' of alternate 'Alt2' cannot use alternate type 'Alt1'
diff --git a/tests/qapi-schema/alternate-nested.json b/tests/qapi-schema/alternate-nested.json
index 8e22186491..9f83ebe2e0 100644
--- a/tests/qapi-schema/alternate-nested.json
+++ b/tests/qapi-schema/alternate-nested.json
@@ -1,5 +1,12 @@
# we reject a nested alternate branch
+
+##
+# @Alt1:
+##
{ 'alternate': 'Alt1',
'data': { 'name': 'str', 'value': 'int' } }
+##
+# @Alt2:
+##
{ 'alternate': 'Alt2',
'data': { 'nested': 'Alt1', 'b': 'bool' } }
diff --git a/tests/qapi-schema/alternate-unknown.err b/tests/qapi-schema/alternate-unknown.err
index dea45dc730..cf5b9b6830 100644
--- a/tests/qapi-schema/alternate-unknown.err
+++ b/tests/qapi-schema/alternate-unknown.err
@@ -1 +1 @@
-tests/qapi-schema/alternate-unknown.json:2: Member 'unknown' of alternate 'Alt' uses unknown type 'MissingType'
+tests/qapi-schema/alternate-unknown.json:6: Member 'unknown' of alternate 'Alt' uses unknown type 'MissingType'
diff --git a/tests/qapi-schema/alternate-unknown.json b/tests/qapi-schema/alternate-unknown.json
index 08c80dced0..941ba1fac4 100644
--- a/tests/qapi-schema/alternate-unknown.json
+++ b/tests/qapi-schema/alternate-unknown.json
@@ -1,3 +1,7 @@
# we reject an alternate with unknown type in branch
+
+##
+# @Alt:
+##
{ 'alternate': 'Alt',
'data': { 'unknown': 'MissingType', 'i': 'int' } }
diff --git a/tests/qapi-schema/args-alternate.err b/tests/qapi-schema/args-alternate.err
index 3086eae56b..2e6bf54245 100644
--- a/tests/qapi-schema/args-alternate.err
+++ b/tests/qapi-schema/args-alternate.err
@@ -1 +1 @@
-tests/qapi-schema/args-alternate.json:3: 'data' for command 'oops' cannot use alternate type 'Alt'
+tests/qapi-schema/args-alternate.json:11: 'data' for command 'oops' cannot use alternate type 'Alt'
diff --git a/tests/qapi-schema/args-alternate.json b/tests/qapi-schema/args-alternate.json
index 69e94d4819..49d0211a03 100644
--- a/tests/qapi-schema/args-alternate.json
+++ b/tests/qapi-schema/args-alternate.json
@@ -1,3 +1,11 @@
# we do not allow alternate arguments
+
+##
+# @Alt:
+##
{ 'alternate': 'Alt', 'data': { 'case1': 'int', 'case2': 'str' } }
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': 'Alt' }
diff --git a/tests/qapi-schema/args-any.err b/tests/qapi-schema/args-any.err
index bf9b5e0730..955504b10f 100644
--- a/tests/qapi-schema/args-any.err
+++ b/tests/qapi-schema/args-any.err
@@ -1 +1 @@
-tests/qapi-schema/args-any.json:2: 'data' for command 'oops' cannot use built-in type 'any'
+tests/qapi-schema/args-any.json:6: 'data' for command 'oops' cannot use built-in type 'any'
diff --git a/tests/qapi-schema/args-any.json b/tests/qapi-schema/args-any.json
index 58fe5e470e..f494479cc9 100644
--- a/tests/qapi-schema/args-any.json
+++ b/tests/qapi-schema/args-any.json
@@ -1,2 +1,6 @@
# we do not allow an 'any' argument
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': 'any' }
diff --git a/tests/qapi-schema/args-array-empty.err b/tests/qapi-schema/args-array-empty.err
index cb7ed33b3f..e85f7918ab 100644
--- a/tests/qapi-schema/args-array-empty.err
+++ b/tests/qapi-schema/args-array-empty.err
@@ -1 +1 @@
-tests/qapi-schema/args-array-empty.json:2: Member 'empty' of 'data' for command 'oops': array type must contain single type name
+tests/qapi-schema/args-array-empty.json:6: Member 'empty' of 'data' for command 'oops': array type must contain single type name
diff --git a/tests/qapi-schema/args-array-empty.json b/tests/qapi-schema/args-array-empty.json
index 652dcfb24a..78a0b88221 100644
--- a/tests/qapi-schema/args-array-empty.json
+++ b/tests/qapi-schema/args-array-empty.json
@@ -1,2 +1,6 @@
# we reject an array for data if it does not contain a known type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': { 'empty': [ ] } }
diff --git a/tests/qapi-schema/args-array-unknown.err b/tests/qapi-schema/args-array-unknown.err
index cd7a0f98d7..77788de099 100644
--- a/tests/qapi-schema/args-array-unknown.err
+++ b/tests/qapi-schema/args-array-unknown.err
@@ -1 +1 @@
-tests/qapi-schema/args-array-unknown.json:2: Member 'array' of 'data' for command 'oops' uses unknown type 'NoSuchType'
+tests/qapi-schema/args-array-unknown.json:6: Member 'array' of 'data' for command 'oops' uses unknown type 'NoSuchType'
diff --git a/tests/qapi-schema/args-array-unknown.json b/tests/qapi-schema/args-array-unknown.json
index 6f3e883315..f680fc10d3 100644
--- a/tests/qapi-schema/args-array-unknown.json
+++ b/tests/qapi-schema/args-array-unknown.json
@@ -1,2 +1,6 @@
# we reject an array for data if it does not contain a known type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': { 'array': [ 'NoSuchType' ] } }
diff --git a/tests/qapi-schema/args-bad-boxed.err b/tests/qapi-schema/args-bad-boxed.err
index ad0d417321..87a906137a 100644
--- a/tests/qapi-schema/args-bad-boxed.err
+++ b/tests/qapi-schema/args-bad-boxed.err
@@ -1 +1 @@
-tests/qapi-schema/args-bad-boxed.json:2: 'boxed' of command 'foo' should only use true value
+tests/qapi-schema/args-bad-boxed.json:6: 'boxed' of command 'foo' should only use true value
diff --git a/tests/qapi-schema/args-bad-boxed.json b/tests/qapi-schema/args-bad-boxed.json
index dea0cd0aa5..4c0b28f291 100644
--- a/tests/qapi-schema/args-bad-boxed.json
+++ b/tests/qapi-schema/args-bad-boxed.json
@@ -1,2 +1,6 @@
# 'boxed' should only appear with value true
+
+##
+# @foo:
+##
{ 'command': 'foo', 'boxed': false }
diff --git a/tests/qapi-schema/args-boxed-anon.err b/tests/qapi-schema/args-boxed-anon.err
index f24f345218..3cfac0b923 100644
--- a/tests/qapi-schema/args-boxed-anon.err
+++ b/tests/qapi-schema/args-boxed-anon.err
@@ -1 +1 @@
-tests/qapi-schema/args-boxed-anon.json:2: 'data' for command 'foo' should be a type name
+tests/qapi-schema/args-boxed-anon.json:6: 'data' for command 'foo' should be a type name
diff --git a/tests/qapi-schema/args-boxed-anon.json b/tests/qapi-schema/args-boxed-anon.json
index 95f60da2ed..2358e6abb1 100644
--- a/tests/qapi-schema/args-boxed-anon.json
+++ b/tests/qapi-schema/args-boxed-anon.json
@@ -1,2 +1,6 @@
# 'boxed' can only be used with named types
+
+##
+# @foo:
+##
{ 'command': 'foo', 'boxed': true, 'data': { 'string': 'str' } }
diff --git a/tests/qapi-schema/args-boxed-empty.err b/tests/qapi-schema/args-boxed-empty.err
index 039603e85c..963f495a9d 100644
--- a/tests/qapi-schema/args-boxed-empty.err
+++ b/tests/qapi-schema/args-boxed-empty.err
@@ -1 +1 @@
-tests/qapi-schema/args-boxed-empty.json:3: Cannot use 'boxed' with empty type
+tests/qapi-schema/args-boxed-empty.json:11: Cannot use 'boxed' with empty type
diff --git a/tests/qapi-schema/args-boxed-empty.json b/tests/qapi-schema/args-boxed-empty.json
index 52717e065f..8e8cc26525 100644
--- a/tests/qapi-schema/args-boxed-empty.json
+++ b/tests/qapi-schema/args-boxed-empty.json
@@ -1,3 +1,11 @@
# 'boxed' requires a non-empty type
+
+##
+# @Empty:
+##
{ 'struct': 'Empty', 'data': {} }
+
+##
+# @foo:
+##
{ 'command': 'foo', 'boxed': true, 'data': 'Empty' }
diff --git a/tests/qapi-schema/args-boxed-string.err b/tests/qapi-schema/args-boxed-string.err
index d326b48aef..7623755208 100644
--- a/tests/qapi-schema/args-boxed-string.err
+++ b/tests/qapi-schema/args-boxed-string.err
@@ -1 +1 @@
-tests/qapi-schema/args-boxed-string.json:2: 'data' for command 'foo' cannot use built-in type 'str'
+tests/qapi-schema/args-boxed-string.json:6: 'data' for command 'foo' cannot use built-in type 'str'
diff --git a/tests/qapi-schema/args-boxed-string.json b/tests/qapi-schema/args-boxed-string.json
index f91a1502e7..aecdf97ce9 100644
--- a/tests/qapi-schema/args-boxed-string.json
+++ b/tests/qapi-schema/args-boxed-string.json
@@ -1,2 +1,6 @@
# 'boxed' requires a complex (not built-in) type
+
+##
+# @foo:
+##
{ 'command': 'foo', 'boxed': true, 'data': 'str' }
diff --git a/tests/qapi-schema/args-int.err b/tests/qapi-schema/args-int.err
index dc1d2504ff..38b3202b09 100644
--- a/tests/qapi-schema/args-int.err
+++ b/tests/qapi-schema/args-int.err
@@ -1 +1 @@
-tests/qapi-schema/args-int.json:2: 'data' for command 'oops' cannot use built-in type 'int'
+tests/qapi-schema/args-int.json:6: 'data' for command 'oops' cannot use built-in type 'int'
diff --git a/tests/qapi-schema/args-int.json b/tests/qapi-schema/args-int.json
index a334d92e8c..7f4e1b7aa6 100644
--- a/tests/qapi-schema/args-int.json
+++ b/tests/qapi-schema/args-int.json
@@ -1,2 +1,6 @@
# we reject commands where data is not an array or complex type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': 'int' }
diff --git a/tests/qapi-schema/args-invalid.err b/tests/qapi-schema/args-invalid.err
index fe1e94975b..5d3568d7c3 100644
--- a/tests/qapi-schema/args-invalid.err
+++ b/tests/qapi-schema/args-invalid.err
@@ -1 +1 @@
-tests/qapi-schema/args-invalid.json:1: 'data' for command 'foo' should be a dictionary or type name
+tests/qapi-schema/args-invalid.json:4: 'data' for command 'foo' should be a dictionary or type name
diff --git a/tests/qapi-schema/args-invalid.json b/tests/qapi-schema/args-invalid.json
index db0981341b..1a7e63bb23 100644
--- a/tests/qapi-schema/args-invalid.json
+++ b/tests/qapi-schema/args-invalid.json
@@ -1,2 +1,5 @@
+##
+# @foo:
+##
{ 'command': 'foo',
'data': false }
diff --git a/tests/qapi-schema/args-member-array-bad.err b/tests/qapi-schema/args-member-array-bad.err
index 881b4d954f..825ffca9bf 100644
--- a/tests/qapi-schema/args-member-array-bad.err
+++ b/tests/qapi-schema/args-member-array-bad.err
@@ -1 +1 @@
-tests/qapi-schema/args-member-array-bad.json:2: Member 'member' of 'data' for command 'oops': array type must contain single type name
+tests/qapi-schema/args-member-array-bad.json:6: Member 'member' of 'data' for command 'oops': array type must contain single type name
diff --git a/tests/qapi-schema/args-member-array-bad.json b/tests/qapi-schema/args-member-array-bad.json
index b2ff144ec6..e934f5c457 100644
--- a/tests/qapi-schema/args-member-array-bad.json
+++ b/tests/qapi-schema/args-member-array-bad.json
@@ -1,2 +1,6 @@
# we reject data if it does not contain a valid array type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': { 'member': [ { 'nested': 'str' } ] } }
diff --git a/tests/qapi-schema/args-member-case.err b/tests/qapi-schema/args-member-case.err
index 19c4426601..a3fb2bdd60 100644
--- a/tests/qapi-schema/args-member-case.err
+++ b/tests/qapi-schema/args-member-case.err
@@ -1 +1 @@
-tests/qapi-schema/args-member-case.json:2: 'Arg' (parameter of no-way-this-will-get-whitelisted) should not use uppercase
+tests/qapi-schema/args-member-case.json:6: 'Arg' (parameter of no-way-this-will-get-whitelisted) should not use uppercase
diff --git a/tests/qapi-schema/args-member-case.json b/tests/qapi-schema/args-member-case.json
index 93439bee8b..811e658d66 100644
--- a/tests/qapi-schema/args-member-case.json
+++ b/tests/qapi-schema/args-member-case.json
@@ -1,2 +1,6 @@
# Member names should be 'lower-case' unless the struct/command is whitelisted
+
+##
+# @no-way-this-will-get-whitelisted:
+##
{ 'command': 'no-way-this-will-get-whitelisted', 'data': { 'Arg': 'int' } }
diff --git a/tests/qapi-schema/args-member-unknown.err b/tests/qapi-schema/args-member-unknown.err
index f6f82828ce..3db452b95a 100644
--- a/tests/qapi-schema/args-member-unknown.err
+++ b/tests/qapi-schema/args-member-unknown.err
@@ -1 +1 @@
-tests/qapi-schema/args-member-unknown.json:2: Member 'member' of 'data' for command 'oops' uses unknown type 'NoSuchType'
+tests/qapi-schema/args-member-unknown.json:6: Member 'member' of 'data' for command 'oops' uses unknown type 'NoSuchType'
diff --git a/tests/qapi-schema/args-member-unknown.json b/tests/qapi-schema/args-member-unknown.json
index 342a41ec90..e2fef9c46f 100644
--- a/tests/qapi-schema/args-member-unknown.json
+++ b/tests/qapi-schema/args-member-unknown.json
@@ -1,2 +1,6 @@
# we reject data if it does not contain a known type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': { 'member': 'NoSuchType' } }
diff --git a/tests/qapi-schema/args-name-clash.err b/tests/qapi-schema/args-name-clash.err
index d953e8d241..23988cb5ca 100644
--- a/tests/qapi-schema/args-name-clash.err
+++ b/tests/qapi-schema/args-name-clash.err
@@ -1 +1 @@
-tests/qapi-schema/args-name-clash.json:4: 'a_b' (parameter of oops) collides with 'a-b' (parameter of oops)
+tests/qapi-schema/args-name-clash.json:8: 'a_b' (parameter of oops) collides with 'a-b' (parameter of oops)
diff --git a/tests/qapi-schema/args-name-clash.json b/tests/qapi-schema/args-name-clash.json
index 61423cb893..991323b78d 100644
--- a/tests/qapi-schema/args-name-clash.json
+++ b/tests/qapi-schema/args-name-clash.json
@@ -1,4 +1,8 @@
# C member name collision
# Reject members that clash when mapped to C names (we would have two 'a_b'
# members).
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': { 'a-b': 'str', 'a_b': 'str' } }
diff --git a/tests/qapi-schema/args-union.err b/tests/qapi-schema/args-union.err
index f8ad223dde..ce0a34e16c 100644
--- a/tests/qapi-schema/args-union.err
+++ b/tests/qapi-schema/args-union.err
@@ -1 +1 @@
-tests/qapi-schema/args-union.json:3: 'data' for command 'oops' cannot use union type 'Uni'
+tests/qapi-schema/args-union.json:10: 'data' for command 'oops' cannot use union type 'Uni'
diff --git a/tests/qapi-schema/args-union.json b/tests/qapi-schema/args-union.json
index 2fcaeaae16..57284b43c5 100644
--- a/tests/qapi-schema/args-union.json
+++ b/tests/qapi-schema/args-union.json
@@ -1,3 +1,10 @@
# use of union arguments requires 'boxed':true
+
+##
+# @Uni:
+##
{ 'union': 'Uni', 'data': { 'case1': 'int', 'case2': 'str' } }
+##
+# oops:
+##
{ 'command': 'oops', 'data': 'Uni' }
diff --git a/tests/qapi-schema/args-unknown.err b/tests/qapi-schema/args-unknown.err
index 4d91ec869f..ba6c6cf326 100644
--- a/tests/qapi-schema/args-unknown.err
+++ b/tests/qapi-schema/args-unknown.err
@@ -1 +1 @@
-tests/qapi-schema/args-unknown.json:2: 'data' for command 'oops' uses unknown type 'NoSuchType'
+tests/qapi-schema/args-unknown.json:6: 'data' for command 'oops' uses unknown type 'NoSuchType'
diff --git a/tests/qapi-schema/args-unknown.json b/tests/qapi-schema/args-unknown.json
index 32aba43b3f..12666dc020 100644
--- a/tests/qapi-schema/args-unknown.json
+++ b/tests/qapi-schema/args-unknown.json
@@ -1,2 +1,6 @@
# we reject data if it does not contain a known type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': 'NoSuchType' }
diff --git a/tests/qapi-schema/bad-base.err b/tests/qapi-schema/bad-base.err
index 154274bdd3..e668761c65 100644
--- a/tests/qapi-schema/bad-base.err
+++ b/tests/qapi-schema/bad-base.err
@@ -1 +1 @@
-tests/qapi-schema/bad-base.json:3: 'base' for struct 'MyType' cannot use union type 'Union'
+tests/qapi-schema/bad-base.json:10: 'base' for struct 'MyType' cannot use union type 'Union'
diff --git a/tests/qapi-schema/bad-base.json b/tests/qapi-schema/bad-base.json
index a634331cdd..c3faa8242b 100644
--- a/tests/qapi-schema/bad-base.json
+++ b/tests/qapi-schema/bad-base.json
@@ -1,3 +1,10 @@
# we reject a base that is not a struct
+
+##
+# @Union:
+##
{ 'union': 'Union', 'data': { 'a': 'int', 'b': 'str' } }
+##
+# @MyType:
+##
{ 'struct': 'MyType', 'base': 'Union', 'data': { 'c': 'int' } }
diff --git a/tests/qapi-schema/bad-data.err b/tests/qapi-schema/bad-data.err
index 8523ac4f46..c1b9e35313 100644
--- a/tests/qapi-schema/bad-data.err
+++ b/tests/qapi-schema/bad-data.err
@@ -1 +1 @@
-tests/qapi-schema/bad-data.json:2: 'data' for command 'oops' cannot be an array
+tests/qapi-schema/bad-data.json:6: 'data' for command 'oops' cannot be an array
diff --git a/tests/qapi-schema/bad-data.json b/tests/qapi-schema/bad-data.json
index 832eeb76f4..51c444f4f8 100644
--- a/tests/qapi-schema/bad-data.json
+++ b/tests/qapi-schema/bad-data.json
@@ -1,2 +1,6 @@
# we ensure 'data' is a dictionary for all but enums
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': [ ] }
diff --git a/tests/qapi-schema/bad-ident.err b/tests/qapi-schema/bad-ident.err
index c4190602b5..b757aa21e7 100644
--- a/tests/qapi-schema/bad-ident.err
+++ b/tests/qapi-schema/bad-ident.err
@@ -1 +1 @@
-tests/qapi-schema/bad-ident.json:2: 'struct' does not allow optional name '*oops'
+tests/qapi-schema/bad-ident.json:6: 'struct' does not allow optional name '*oops'
diff --git a/tests/qapi-schema/bad-ident.json b/tests/qapi-schema/bad-ident.json
index 763627ad23..b43df7a3e0 100644
--- a/tests/qapi-schema/bad-ident.json
+++ b/tests/qapi-schema/bad-ident.json
@@ -1,2 +1,6 @@
# we reject creating a type name with bad name
+
+##
+# @*oops:
+##
{ 'struct': '*oops', 'data': { 'i': 'int' } }
diff --git a/tests/qapi-schema/bad-type-bool.err b/tests/qapi-schema/bad-type-bool.err
index 62fd70baaf..72e026b46c 100644
--- a/tests/qapi-schema/bad-type-bool.err
+++ b/tests/qapi-schema/bad-type-bool.err
@@ -1 +1 @@
-tests/qapi-schema/bad-type-bool.json:2: 'struct' key must have a string value
+tests/qapi-schema/bad-type-bool.json:6: 'struct' key must have a string value
diff --git a/tests/qapi-schema/bad-type-bool.json b/tests/qapi-schema/bad-type-bool.json
index bde17b56c4..1f9eddf938 100644
--- a/tests/qapi-schema/bad-type-bool.json
+++ b/tests/qapi-schema/bad-type-bool.json
@@ -1,2 +1,6 @@
# we reject an expression with a metatype that is not a string
+
+##
+# @true:
+##
{ 'struct': true, 'data': { } }
diff --git a/tests/qapi-schema/bad-type-dict.err b/tests/qapi-schema/bad-type-dict.err
index 0b2a2aeac4..d0d1f607e5 100644
--- a/tests/qapi-schema/bad-type-dict.err
+++ b/tests/qapi-schema/bad-type-dict.err
@@ -1 +1 @@
-tests/qapi-schema/bad-type-dict.json:2: 'command' key must have a string value
+tests/qapi-schema/bad-type-dict.json:6: 'command' key must have a string value
diff --git a/tests/qapi-schema/bad-type-dict.json b/tests/qapi-schema/bad-type-dict.json
index 2a91b241f8..5952caab28 100644
--- a/tests/qapi-schema/bad-type-dict.json
+++ b/tests/qapi-schema/bad-type-dict.json
@@ -1,2 +1,6 @@
# we reject an expression with a metatype that is not a string
+
+##
+# @foo:
+##
{ 'command': { } }
diff --git a/tests/qapi-schema/base-cycle-direct.err b/tests/qapi-schema/base-cycle-direct.err
index 9c68f6543d..dd7f5aace6 100644
--- a/tests/qapi-schema/base-cycle-direct.err
+++ b/tests/qapi-schema/base-cycle-direct.err
@@ -1 +1 @@
-tests/qapi-schema/base-cycle-direct.json:2: Object Loopy contains itself
+tests/qapi-schema/base-cycle-direct.json:6: Object Loopy contains itself
diff --git a/tests/qapi-schema/base-cycle-direct.json b/tests/qapi-schema/base-cycle-direct.json
index 4fc66d0516..9780f7e2ca 100644
--- a/tests/qapi-schema/base-cycle-direct.json
+++ b/tests/qapi-schema/base-cycle-direct.json
@@ -1,2 +1,6 @@
# we reject a loop in base classes
+
+##
+# @Loopy:
+##
{ 'struct': 'Loopy', 'base': 'Loopy', 'data': {} }
diff --git a/tests/qapi-schema/base-cycle-indirect.err b/tests/qapi-schema/base-cycle-indirect.err
index fc92fe47f8..f4198e4a40 100644
--- a/tests/qapi-schema/base-cycle-indirect.err
+++ b/tests/qapi-schema/base-cycle-indirect.err
@@ -1 +1 @@
-tests/qapi-schema/base-cycle-indirect.json:2: Object Base1 contains itself
+tests/qapi-schema/base-cycle-indirect.json:6: Object Base1 contains itself
diff --git a/tests/qapi-schema/base-cycle-indirect.json b/tests/qapi-schema/base-cycle-indirect.json
index 28667721a3..99926c4609 100644
--- a/tests/qapi-schema/base-cycle-indirect.json
+++ b/tests/qapi-schema/base-cycle-indirect.json
@@ -1,3 +1,10 @@
# we reject a loop in base classes
+
+##
+# @Base1:
+##
{ 'struct': 'Base1', 'base': 'Base2', 'data': {} }
+##
+# @Base2:
+##
{ 'struct': 'Base2', 'base': 'Base1', 'data': {} }
diff --git a/tests/qapi-schema/command-int.err b/tests/qapi-schema/command-int.err
index 0f9300679b..3c834a97ab 100644
--- a/tests/qapi-schema/command-int.err
+++ b/tests/qapi-schema/command-int.err
@@ -1 +1 @@
-tests/qapi-schema/command-int.json:2: built-in 'int' is already defined
+tests/qapi-schema/command-int.json:6: built-in 'int' is already defined
diff --git a/tests/qapi-schema/command-int.json b/tests/qapi-schema/command-int.json
index 9a62554fc6..5b51bf148b 100644
--- a/tests/qapi-schema/command-int.json
+++ b/tests/qapi-schema/command-int.json
@@ -1,2 +1,6 @@
# we reject collisions between commands and types
+
+##
+# @int:
+##
{ 'command': 'int', 'data': { 'character': 'str' } }
diff --git a/tests/qapi-schema/comments.json b/tests/qapi-schema/comments.json
index e643f3a74c..d31ef0d90a 100644
--- a/tests/qapi-schema/comments.json
+++ b/tests/qapi-schema/comments.json
@@ -1,4 +1,8 @@
# Unindented comment
+
+##
+# @Status:
+##
{ 'enum': 'Status', # Comment to the right of code
# Indented comment
'data': [ 'good', 'bad', 'ugly' ] }
diff --git a/tests/qapi-schema/comments.out b/tests/qapi-schema/comments.out
index 5d7c13cad1..a962fb2d2e 100644
--- a/tests/qapi-schema/comments.out
+++ b/tests/qapi-schema/comments.out
@@ -2,3 +2,4 @@ enum QType ['none', 'qnull', 'qint', 'qstring', 'qdict', 'qlist', 'qfloat', 'qbo
prefix QTYPE
enum Status ['good', 'bad', 'ugly']
object q_empty
+doc symbol=Status expr=('enum', 'Status')
diff --git a/tests/qapi-schema/doc-bad-args.err b/tests/qapi-schema/doc-bad-args.err
new file mode 100644
index 0000000000..5d44d9b668
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-args.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-bad-args.json:3: The following documented members are not in the declaration: b
diff --git a/tests/qapi-schema/doc-bad-args.exit b/tests/qapi-schema/doc-bad-args.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-args.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-bad-args.json b/tests/qapi-schema/doc-bad-args.json
new file mode 100644
index 0000000000..048e0fc5ef
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-args.json
@@ -0,0 +1,8 @@
+# Arguments listed in the doc comment must exist in the actual schema
+
+##
+# @foo:
+# @a: a
+# @b: b
+##
+{ 'command': 'foo', 'data': {'a': 'int'} }
diff --git a/tests/qapi-schema/doc-bad-args.out b/tests/qapi-schema/doc-bad-args.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-args.out
diff --git a/tests/qapi-schema/doc-bad-symbol.err b/tests/qapi-schema/doc-bad-symbol.err
new file mode 100644
index 0000000000..ac4e5667cb
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-symbol.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-bad-symbol.json:3: Definition of 'foo' follows documentation for 'food'
diff --git a/tests/qapi-schema/doc-bad-symbol.exit b/tests/qapi-schema/doc-bad-symbol.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-symbol.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-bad-symbol.json b/tests/qapi-schema/doc-bad-symbol.json
new file mode 100644
index 0000000000..a7c15b3b8f
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-symbol.json
@@ -0,0 +1,6 @@
+# Documentation symbol mismatch with expression
+
+##
+# @food:
+##
+{ 'command': 'foo', 'data': {'a': 'int'} }
diff --git a/tests/qapi-schema/doc-bad-symbol.out b/tests/qapi-schema/doc-bad-symbol.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-symbol.out
diff --git a/tests/qapi-schema/doc-duplicated-arg.err b/tests/qapi-schema/doc-duplicated-arg.err
new file mode 100644
index 0000000000..1c3f8e0a54
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-arg.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-duplicated-arg.json:6:1: 'a' parameter name duplicated
diff --git a/tests/qapi-schema/doc-duplicated-arg.exit b/tests/qapi-schema/doc-duplicated-arg.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-arg.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-duplicated-arg.json b/tests/qapi-schema/doc-duplicated-arg.json
new file mode 100644
index 0000000000..035cae9745
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-arg.json
@@ -0,0 +1,7 @@
+# Do not allow duplicated argument
+
+##
+# @foo:
+# @a:
+# @a:
+##
diff --git a/tests/qapi-schema/doc-duplicated-arg.out b/tests/qapi-schema/doc-duplicated-arg.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-arg.out
diff --git a/tests/qapi-schema/doc-duplicated-return.err b/tests/qapi-schema/doc-duplicated-return.err
new file mode 100644
index 0000000000..e48039f8e5
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-return.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-duplicated-return.json:7:1: Duplicated 'Returns' section
diff --git a/tests/qapi-schema/doc-duplicated-return.exit b/tests/qapi-schema/doc-duplicated-return.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-return.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-duplicated-return.json b/tests/qapi-schema/doc-duplicated-return.json
new file mode 100644
index 0000000000..b44b5ae979
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-return.json
@@ -0,0 +1,8 @@
+# Do not allow duplicated Returns section
+
+##
+# @foo:
+#
+# Returns: 0
+# Returns: 1
+##
diff --git a/tests/qapi-schema/doc-duplicated-return.out b/tests/qapi-schema/doc-duplicated-return.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-return.out
diff --git a/tests/qapi-schema/doc-duplicated-since.err b/tests/qapi-schema/doc-duplicated-since.err
new file mode 100644
index 0000000000..3fb890744a
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-since.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-duplicated-since.json:7:1: Duplicated 'Since' section
diff --git a/tests/qapi-schema/doc-duplicated-since.exit b/tests/qapi-schema/doc-duplicated-since.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-since.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-duplicated-since.json b/tests/qapi-schema/doc-duplicated-since.json
new file mode 100644
index 0000000000..343cd872cb
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-since.json
@@ -0,0 +1,8 @@
+# Do not allow duplicated Since section
+
+##
+# @foo:
+#
+# Since: 0
+# Since: 1
+##
diff --git a/tests/qapi-schema/doc-duplicated-since.out b/tests/qapi-schema/doc-duplicated-since.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-duplicated-since.out
diff --git a/tests/qapi-schema/doc-empty-arg.err b/tests/qapi-schema/doc-empty-arg.err
new file mode 100644
index 0000000000..2895518fa7
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-arg.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-empty-arg.json:5:1: Invalid parameter name
diff --git a/tests/qapi-schema/doc-empty-arg.exit b/tests/qapi-schema/doc-empty-arg.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-arg.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-empty-arg.json b/tests/qapi-schema/doc-empty-arg.json
new file mode 100644
index 0000000000..8f76ede8f3
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-arg.json
@@ -0,0 +1,6 @@
+# An invalid empty argument name
+
+##
+# @foo:
+# @:
+##
diff --git a/tests/qapi-schema/doc-empty-arg.out b/tests/qapi-schema/doc-empty-arg.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-arg.out
diff --git a/tests/qapi-schema/doc-empty-section.err b/tests/qapi-schema/doc-empty-section.err
new file mode 100644
index 0000000000..00ad625e17
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-section.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-empty-section.json:3: Empty doc section 'Note'
diff --git a/tests/qapi-schema/doc-empty-section.exit b/tests/qapi-schema/doc-empty-section.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-section.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-empty-section.json b/tests/qapi-schema/doc-empty-section.json
new file mode 100644
index 0000000000..f3384e9a3b
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-section.json
@@ -0,0 +1,8 @@
+# Tagged-section must not be empty
+
+##
+# @foo:
+#
+# Note:
+##
+{ 'command': 'foo', 'data': {'a': 'int'} }
diff --git a/tests/qapi-schema/doc-empty-section.out b/tests/qapi-schema/doc-empty-section.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-section.out
diff --git a/tests/qapi-schema/doc-empty-symbol.err b/tests/qapi-schema/doc-empty-symbol.err
new file mode 100644
index 0000000000..1936ad094f
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-symbol.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-empty-symbol.json:4:1: Invalid name
diff --git a/tests/qapi-schema/doc-empty-symbol.exit b/tests/qapi-schema/doc-empty-symbol.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-symbol.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-empty-symbol.json b/tests/qapi-schema/doc-empty-symbol.json
new file mode 100644
index 0000000000..fb8fddc4ae
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-symbol.json
@@ -0,0 +1,5 @@
+# Invalid documentation symbol
+
+##
+# @:
+##
diff --git a/tests/qapi-schema/doc-empty-symbol.out b/tests/qapi-schema/doc-empty-symbol.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-empty-symbol.out
diff --git a/tests/qapi-schema/doc-interleaved-section.err b/tests/qapi-schema/doc-interleaved-section.err
new file mode 100644
index 0000000000..d373eabc55
--- /dev/null
+++ b/tests/qapi-schema/doc-interleaved-section.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-interleaved-section.json:15:1: '@foobar:' can't follow 'Note' section
diff --git a/tests/qapi-schema/doc-interleaved-section.exit b/tests/qapi-schema/doc-interleaved-section.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-interleaved-section.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-interleaved-section.json b/tests/qapi-schema/doc-interleaved-section.json
new file mode 100644
index 0000000000..adb29e98da
--- /dev/null
+++ b/tests/qapi-schema/doc-interleaved-section.json
@@ -0,0 +1,21 @@
+# Arguments and sections must not be interleaved
+
+##
+# @TestStruct:
+#
+# body
+#
+# @integer: foo
+# blah
+#
+# bao
+#
+# Note: a section.
+#
+# @foobar: catch this
+#
+# Since: 2.3
+#
+##
+{ 'struct': 'TestStruct',
+ 'data': { 'integer': 'int', 'foobar': 'int' } }
diff --git a/tests/qapi-schema/doc-interleaved-section.out b/tests/qapi-schema/doc-interleaved-section.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-interleaved-section.out
diff --git a/tests/qapi-schema/doc-invalid-end.err b/tests/qapi-schema/doc-invalid-end.err
new file mode 100644
index 0000000000..2bda28cb54
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-invalid-end.json:5:2: Documentation comment must end with '##'
diff --git a/tests/qapi-schema/doc-invalid-end.exit b/tests/qapi-schema/doc-invalid-end.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-invalid-end.json b/tests/qapi-schema/doc-invalid-end.json
new file mode 100644
index 0000000000..3583b23b18
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end.json
@@ -0,0 +1,5 @@
+# Documentation must end with '##'
+
+##
+# An invalid comment
+#
diff --git a/tests/qapi-schema/doc-invalid-end.out b/tests/qapi-schema/doc-invalid-end.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end.out
diff --git a/tests/qapi-schema/doc-invalid-end2.err b/tests/qapi-schema/doc-invalid-end2.err
new file mode 100644
index 0000000000..6fad9c789e
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end2.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-invalid-end2.json:5:1: Junk after '##' at end of documentation comment
diff --git a/tests/qapi-schema/doc-invalid-end2.exit b/tests/qapi-schema/doc-invalid-end2.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end2.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-invalid-end2.json b/tests/qapi-schema/doc-invalid-end2.json
new file mode 100644
index 0000000000..fa2d39d7c2
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end2.json
@@ -0,0 +1,5 @@
+# Documentation must end with '##'
+
+##
+#
+## invalid
diff --git a/tests/qapi-schema/doc-invalid-end2.out b/tests/qapi-schema/doc-invalid-end2.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-end2.out
diff --git a/tests/qapi-schema/doc-invalid-return.err b/tests/qapi-schema/doc-invalid-return.err
new file mode 100644
index 0000000000..5aaba33bb4
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-return.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-invalid-return.json:3: 'Returns:' is only valid for commands
diff --git a/tests/qapi-schema/doc-invalid-return.exit b/tests/qapi-schema/doc-invalid-return.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-return.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-invalid-return.json b/tests/qapi-schema/doc-invalid-return.json
new file mode 100644
index 0000000000..1ba45de414
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-return.json
@@ -0,0 +1,7 @@
+# Events can't have 'Returns' section
+
+##
+# @foo:
+# Returns: blah
+##
+{ 'event': 'foo' }
diff --git a/tests/qapi-schema/doc-invalid-return.out b/tests/qapi-schema/doc-invalid-return.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-return.out
diff --git a/tests/qapi-schema/doc-invalid-section.err b/tests/qapi-schema/doc-invalid-section.err
new file mode 100644
index 0000000000..85bb67b829
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-section.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-invalid-section.json:3: Free-form documentation block must not contain @NAME: sections
diff --git a/tests/qapi-schema/doc-invalid-section.exit b/tests/qapi-schema/doc-invalid-section.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-section.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-invalid-section.json b/tests/qapi-schema/doc-invalid-section.json
new file mode 100644
index 0000000000..0578b8ae25
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-section.json
@@ -0,0 +1,6 @@
+# Free-form documentation doesn't have tagged-sections
+
+##
+# freeform
+# @note: foo
+##
diff --git a/tests/qapi-schema/doc-invalid-section.out b/tests/qapi-schema/doc-invalid-section.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-section.out
diff --git a/tests/qapi-schema/doc-invalid-start.err b/tests/qapi-schema/doc-invalid-start.err
new file mode 100644
index 0000000000..149af2bfac
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-start.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-invalid-start.json:3:1: Junk after '##' at start of documentation comment
diff --git a/tests/qapi-schema/doc-invalid-start.exit b/tests/qapi-schema/doc-invalid-start.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-start.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-invalid-start.json b/tests/qapi-schema/doc-invalid-start.json
new file mode 100644
index 0000000000..4f6c15a38c
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-start.json
@@ -0,0 +1,5 @@
+# Documentation must start with '##'
+
+## invalid
+#
+##
diff --git a/tests/qapi-schema/doc-invalid-start.out b/tests/qapi-schema/doc-invalid-start.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-invalid-start.out
diff --git a/tests/qapi-schema/doc-missing-colon.err b/tests/qapi-schema/doc-missing-colon.err
new file mode 100644
index 0000000000..817398b8e4
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-colon.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-missing-colon.json:4:1: Line should end with :
diff --git a/tests/qapi-schema/doc-missing-colon.exit b/tests/qapi-schema/doc-missing-colon.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-colon.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-missing-colon.json b/tests/qapi-schema/doc-missing-colon.json
new file mode 100644
index 0000000000..d88c06c6dd
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-colon.json
@@ -0,0 +1,5 @@
+# The symbol section must end with ':'
+
+##
+# @missing-colon
+##
diff --git a/tests/qapi-schema/doc-missing-colon.out b/tests/qapi-schema/doc-missing-colon.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-colon.out
diff --git a/tests/qapi-schema/doc-missing-expr.err b/tests/qapi-schema/doc-missing-expr.err
new file mode 100644
index 0000000000..c0e687cadd
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-expr.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-missing-expr.json:3: Documention for 'bar' is not followed by the definition
diff --git a/tests/qapi-schema/doc-missing-expr.exit b/tests/qapi-schema/doc-missing-expr.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-expr.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-missing-expr.json b/tests/qapi-schema/doc-missing-expr.json
new file mode 100644
index 0000000000..06ad7df8d6
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-expr.json
@@ -0,0 +1,5 @@
+# Expression documentation must be followed by the actual expression
+
+##
+# @bar:
+##
diff --git a/tests/qapi-schema/doc-missing-expr.out b/tests/qapi-schema/doc-missing-expr.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-expr.out
diff --git a/tests/qapi-schema/doc-missing-space.err b/tests/qapi-schema/doc-missing-space.err
new file mode 100644
index 0000000000..d6b46ffd77
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-space.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-missing-space.json:5:1: Missing space after #
diff --git a/tests/qapi-schema/doc-missing-space.exit b/tests/qapi-schema/doc-missing-space.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-space.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-missing-space.json b/tests/qapi-schema/doc-missing-space.json
new file mode 100644
index 0000000000..beb276bc64
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-space.json
@@ -0,0 +1,6 @@
+# Documentation line must have a leading space
+
+##
+# missing space:
+#wef
+##
diff --git a/tests/qapi-schema/doc-missing-space.out b/tests/qapi-schema/doc-missing-space.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-missing-space.out
diff --git a/tests/qapi-schema/doc-optional.err b/tests/qapi-schema/doc-optional.err
new file mode 100644
index 0000000000..20d405af79
--- /dev/null
+++ b/tests/qapi-schema/doc-optional.err
@@ -0,0 +1 @@
+tests/qapi-schema/doc-optional.json:3: Description has #optional, but the declaration doesn't
diff --git a/tests/qapi-schema/doc-optional.exit b/tests/qapi-schema/doc-optional.exit
new file mode 100644
index 0000000000..d00491fd7e
--- /dev/null
+++ b/tests/qapi-schema/doc-optional.exit
@@ -0,0 +1 @@
+1
diff --git a/tests/qapi-schema/doc-optional.json b/tests/qapi-schema/doc-optional.json
new file mode 100644
index 0000000000..06c855ec94
--- /dev/null
+++ b/tests/qapi-schema/doc-optional.json
@@ -0,0 +1,7 @@
+# Description #optional should match declaration
+
+##
+# @foo:
+# @a: a #optional
+##
+{ 'command': 'foo', 'data': {'a': 'int'} }
diff --git a/tests/qapi-schema/doc-optional.out b/tests/qapi-schema/doc-optional.out
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-optional.out
diff --git a/tests/qapi-schema/double-type.err b/tests/qapi-schema/double-type.err
index f9613c6d6b..424df9bedd 100644
--- a/tests/qapi-schema/double-type.err
+++ b/tests/qapi-schema/double-type.err
@@ -1 +1 @@
-tests/qapi-schema/double-type.json:2: Unknown key 'command' in struct 'bar'
+tests/qapi-schema/double-type.json:6: Unknown key 'command' in struct 'bar'
diff --git a/tests/qapi-schema/double-type.json b/tests/qapi-schema/double-type.json
index 911fa7af50..ab59523ff7 100644
--- a/tests/qapi-schema/double-type.json
+++ b/tests/qapi-schema/double-type.json
@@ -1,2 +1,6 @@
# we reject an expression with ambiguous metatype
+
+##
+# @foo:
+##
{ 'command': 'foo', 'struct': 'bar', 'data': { } }
diff --git a/tests/qapi-schema/enum-bad-name.err b/tests/qapi-schema/enum-bad-name.err
index 9c3c1002b7..157d1b0d69 100644
--- a/tests/qapi-schema/enum-bad-name.err
+++ b/tests/qapi-schema/enum-bad-name.err
@@ -1 +1 @@
-tests/qapi-schema/enum-bad-name.json:2: Member of enum 'MyEnum' uses invalid name 'not^possible'
+tests/qapi-schema/enum-bad-name.json:6: Member of enum 'MyEnum' uses invalid name 'not^possible'
diff --git a/tests/qapi-schema/enum-bad-name.json b/tests/qapi-schema/enum-bad-name.json
index 8506562b31..978cb88994 100644
--- a/tests/qapi-schema/enum-bad-name.json
+++ b/tests/qapi-schema/enum-bad-name.json
@@ -1,2 +1,6 @@
# we ensure all enum names can map to C
+
+##
+# @MyEnum:
+##
{ 'enum': 'MyEnum', 'data': [ 'not^possible' ] }
diff --git a/tests/qapi-schema/enum-bad-prefix.err b/tests/qapi-schema/enum-bad-prefix.err
index 399f5f7af5..918915f7ab 100644
--- a/tests/qapi-schema/enum-bad-prefix.err
+++ b/tests/qapi-schema/enum-bad-prefix.err
@@ -1 +1 @@
-tests/qapi-schema/enum-bad-prefix.json:2: Enum 'MyEnum' requires a string for 'prefix'
+tests/qapi-schema/enum-bad-prefix.json:6: Enum 'MyEnum' requires a string for 'prefix'
diff --git a/tests/qapi-schema/enum-bad-prefix.json b/tests/qapi-schema/enum-bad-prefix.json
index 996f628f6d..25f17a7b08 100644
--- a/tests/qapi-schema/enum-bad-prefix.json
+++ b/tests/qapi-schema/enum-bad-prefix.json
@@ -1,2 +1,6 @@
# The prefix must be a string type
+
+##
+# @MyEnum:
+##
{ 'enum': 'MyEnum', 'data': [ 'one' ], 'prefix': [ 'fish' ] }
diff --git a/tests/qapi-schema/enum-clash-member.err b/tests/qapi-schema/enum-clash-member.err
index 5403c78507..25249b63c4 100644
--- a/tests/qapi-schema/enum-clash-member.err
+++ b/tests/qapi-schema/enum-clash-member.err
@@ -1 +1 @@
-tests/qapi-schema/enum-clash-member.json:2: 'one_two' (member of MyEnum) collides with 'one-two' (member of MyEnum)
+tests/qapi-schema/enum-clash-member.json:6: 'one_two' (member of MyEnum) collides with 'one-two' (member of MyEnum)
diff --git a/tests/qapi-schema/enum-clash-member.json b/tests/qapi-schema/enum-clash-member.json
index b6928b8bfd..fd52751941 100644
--- a/tests/qapi-schema/enum-clash-member.json
+++ b/tests/qapi-schema/enum-clash-member.json
@@ -1,2 +1,6 @@
# we reject enums where members will clash when mapped to C enum
+
+##
+# @MyEnum:
+##
{ 'enum': 'MyEnum', 'data': [ 'one-two', 'one_two' ] }
diff --git a/tests/qapi-schema/enum-dict-member.err b/tests/qapi-schema/enum-dict-member.err
index 8ca146ea59..9b7d2f111d 100644
--- a/tests/qapi-schema/enum-dict-member.err
+++ b/tests/qapi-schema/enum-dict-member.err
@@ -1 +1 @@
-tests/qapi-schema/enum-dict-member.json:2: Member of enum 'MyEnum' requires a string name
+tests/qapi-schema/enum-dict-member.json:6: Member of enum 'MyEnum' requires a string name
diff --git a/tests/qapi-schema/enum-dict-member.json b/tests/qapi-schema/enum-dict-member.json
index 79672e0f09..69d30f0c1e 100644
--- a/tests/qapi-schema/enum-dict-member.json
+++ b/tests/qapi-schema/enum-dict-member.json
@@ -1,2 +1,6 @@
# we reject any enum member that is not a string
+
+##
+# @MyEnum:
+##
{ 'enum': 'MyEnum', 'data': [ { 'value': 'str' } ] }
diff --git a/tests/qapi-schema/enum-member-case.err b/tests/qapi-schema/enum-member-case.err
index b652e9aacc..df96e2205a 100644
--- a/tests/qapi-schema/enum-member-case.err
+++ b/tests/qapi-schema/enum-member-case.err
@@ -1 +1 @@
-tests/qapi-schema/enum-member-case.json:3: 'Value' (member of NoWayThisWillGetWhitelisted) should not use uppercase
+tests/qapi-schema/enum-member-case.json:10: 'Value' (member of NoWayThisWillGetWhitelisted) should not use uppercase
diff --git a/tests/qapi-schema/enum-member-case.json b/tests/qapi-schema/enum-member-case.json
index 2096b350ca..d2e4aba39d 100644
--- a/tests/qapi-schema/enum-member-case.json
+++ b/tests/qapi-schema/enum-member-case.json
@@ -1,3 +1,10 @@
# Member names should be 'lower-case' unless the enum is whitelisted
+
+##
+# @UuidInfo:
+##
{ 'enum': 'UuidInfo', 'data': [ 'Value' ] } # UuidInfo is whitelisted
+##
+# @NoWayThisWillGetWhitelisted:
+##
{ 'enum': 'NoWayThisWillGetWhitelisted', 'data': [ 'Value' ] }
diff --git a/tests/qapi-schema/enum-missing-data.err b/tests/qapi-schema/enum-missing-data.err
index ba4873ae69..de4b9e8281 100644
--- a/tests/qapi-schema/enum-missing-data.err
+++ b/tests/qapi-schema/enum-missing-data.err
@@ -1 +1 @@
-tests/qapi-schema/enum-missing-data.json:2: Key 'data' is missing from enum 'MyEnum'
+tests/qapi-schema/enum-missing-data.json:6: Key 'data' is missing from enum 'MyEnum'
diff --git a/tests/qapi-schema/enum-missing-data.json b/tests/qapi-schema/enum-missing-data.json
index 558fd35e93..d7601f91fb 100644
--- a/tests/qapi-schema/enum-missing-data.json
+++ b/tests/qapi-schema/enum-missing-data.json
@@ -1,2 +1,6 @@
# we require that all QAPI enums have a data array
+
+##
+# @MyEnum:
+##
{ 'enum': 'MyEnum' }
diff --git a/tests/qapi-schema/enum-wrong-data.err b/tests/qapi-schema/enum-wrong-data.err
index 11b43471cf..c44e9b59dc 100644
--- a/tests/qapi-schema/enum-wrong-data.err
+++ b/tests/qapi-schema/enum-wrong-data.err
@@ -1 +1 @@
-tests/qapi-schema/enum-wrong-data.json:2: Enum 'MyEnum' requires an array for 'data'
+tests/qapi-schema/enum-wrong-data.json:6: Enum 'MyEnum' requires an array for 'data'
diff --git a/tests/qapi-schema/enum-wrong-data.json b/tests/qapi-schema/enum-wrong-data.json
index 7b3e255c14..4b9e97878b 100644
--- a/tests/qapi-schema/enum-wrong-data.json
+++ b/tests/qapi-schema/enum-wrong-data.json
@@ -1,2 +1,6 @@
# we require that all qapi enums have an array for data
+
+##
+# @MyEnum:
+##
{ 'enum': 'MyEnum', 'data': { 'value': 'str' } }
diff --git a/tests/qapi-schema/event-boxed-empty.err b/tests/qapi-schema/event-boxed-empty.err
index 68ec6f2d2b..defe656e32 100644
--- a/tests/qapi-schema/event-boxed-empty.err
+++ b/tests/qapi-schema/event-boxed-empty.err
@@ -1 +1 @@
-tests/qapi-schema/event-boxed-empty.json:2: Use of 'boxed' requires 'data'
+tests/qapi-schema/event-boxed-empty.json:6: Use of 'boxed' requires 'data'
diff --git a/tests/qapi-schema/event-boxed-empty.json b/tests/qapi-schema/event-boxed-empty.json
index cb145f1433..63b870b31b 100644
--- a/tests/qapi-schema/event-boxed-empty.json
+++ b/tests/qapi-schema/event-boxed-empty.json
@@ -1,2 +1,6 @@
# 'boxed' requires a non-empty type
+
+##
+# @FOO:
+##
{ 'event': 'FOO', 'boxed': true }
diff --git a/tests/qapi-schema/event-case.json b/tests/qapi-schema/event-case.json
index 3a92d8b610..6b05c5d247 100644
--- a/tests/qapi-schema/event-case.json
+++ b/tests/qapi-schema/event-case.json
@@ -1,3 +1,7 @@
# TODO: might be nice to enforce naming conventions; but until then this works
# even though events should usually be ALL_CAPS
+
+##
+# @oops:
+##
{ 'event': 'oops' }
diff --git a/tests/qapi-schema/event-case.out b/tests/qapi-schema/event-case.out
index 5a0f2bf805..2865714ad5 100644
--- a/tests/qapi-schema/event-case.out
+++ b/tests/qapi-schema/event-case.out
@@ -3,3 +3,4 @@ enum QType ['none', 'qnull', 'qint', 'qstring', 'qdict', 'qlist', 'qfloat', 'qbo
event oops None
boxed=False
object q_empty
+doc symbol=oops expr=('event', 'oops')
diff --git a/tests/qapi-schema/event-nest-struct.err b/tests/qapi-schema/event-nest-struct.err
index 5a42701b8f..17a6c3c7b9 100644
--- a/tests/qapi-schema/event-nest-struct.err
+++ b/tests/qapi-schema/event-nest-struct.err
@@ -1 +1 @@
-tests/qapi-schema/event-nest-struct.json:1: Member 'a' of 'data' for event 'EVENT_A' should be a type name
+tests/qapi-schema/event-nest-struct.json:5: Member 'a' of 'data' for event 'EVENT_A' should be a type name
diff --git a/tests/qapi-schema/event-nest-struct.json b/tests/qapi-schema/event-nest-struct.json
index ee6f3ecb6f..328e0a64d3 100644
--- a/tests/qapi-schema/event-nest-struct.json
+++ b/tests/qapi-schema/event-nest-struct.json
@@ -1,2 +1,6 @@
+##
+# @EVENT_A:
+# event-nest-struct
+##
{ 'event': 'EVENT_A',
'data': { 'a' : { 'string' : 'str', 'integer': 'int' }, 'b' : 'str' } }
diff --git a/tests/qapi-schema/flat-union-array-branch.err b/tests/qapi-schema/flat-union-array-branch.err
index 8ea91eadb2..e456094993 100644
--- a/tests/qapi-schema/flat-union-array-branch.err
+++ b/tests/qapi-schema/flat-union-array-branch.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-array-branch.json:8: Member 'value1' of union 'TestUnion' cannot be an array
+tests/qapi-schema/flat-union-array-branch.json:20: Member 'value1' of union 'TestUnion' cannot be an array
diff --git a/tests/qapi-schema/flat-union-array-branch.json b/tests/qapi-schema/flat-union-array-branch.json
index 0b98820a8f..51dde10392 100644
--- a/tests/qapi-schema/flat-union-array-branch.json
+++ b/tests/qapi-schema/flat-union-array-branch.json
@@ -1,10 +1,22 @@
+##
+# @TestEnum:
+##
# we require flat union branches to be a struct
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'enum1': 'TestEnum' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'Base',
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-bad-base.err b/tests/qapi-schema/flat-union-bad-base.err
index bee24a217a..072ffbaadd 100644
--- a/tests/qapi-schema/flat-union-bad-base.err
+++ b/tests/qapi-schema/flat-union-bad-base.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-bad-base.json:8: 'string' (member of TestTypeA) collides with 'string' (base of TestUnion)
+tests/qapi-schema/flat-union-bad-base.json:21: 'string' (member of TestTypeA) collides with 'string' (base of TestUnion)
diff --git a/tests/qapi-schema/flat-union-bad-base.json b/tests/qapi-schema/flat-union-bad-base.json
index 74dd421708..7713e7f0ad 100644
--- a/tests/qapi-schema/flat-union-bad-base.json
+++ b/tests/qapi-schema/flat-union-bad-base.json
@@ -1,10 +1,23 @@
# we allow anonymous base, but enforce no duplicate keys
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': { 'enum1': 'TestEnum', 'string': 'str' },
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-bad-discriminator.err b/tests/qapi-schema/flat-union-bad-discriminator.err
index c38cc8e4df..1be4e7b23a 100644
--- a/tests/qapi-schema/flat-union-bad-discriminator.err
+++ b/tests/qapi-schema/flat-union-bad-discriminator.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-bad-discriminator.json:11: Discriminator of flat union 'TestUnion' requires a string name
+tests/qapi-schema/flat-union-bad-discriminator.json:27: Discriminator of flat union 'TestUnion' requires a string name
diff --git a/tests/qapi-schema/flat-union-bad-discriminator.json b/tests/qapi-schema/flat-union-bad-discriminator.json
index cd10b9d901..ef92f9b583 100644
--- a/tests/qapi-schema/flat-union-bad-discriminator.json
+++ b/tests/qapi-schema/flat-union-bad-discriminator.json
@@ -1,13 +1,29 @@
# we require the discriminator to be a string naming a base-type member
# this tests the old syntax for anonymous unions before we added alternates
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestBase:
+##
{ 'struct': 'TestBase',
'data': { 'enum1': 'TestEnum', 'kind': 'str' } }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'TestBase',
'discriminator': {},
diff --git a/tests/qapi-schema/flat-union-base-any.err b/tests/qapi-schema/flat-union-base-any.err
index 646f1c9cd1..c1ea2d76b3 100644
--- a/tests/qapi-schema/flat-union-base-any.err
+++ b/tests/qapi-schema/flat-union-base-any.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-base-any.json:8: 'base' for union 'TestUnion' cannot use built-in type 'any'
+tests/qapi-schema/flat-union-base-any.json:21: 'base' for union 'TestUnion' cannot use built-in type 'any'
diff --git a/tests/qapi-schema/flat-union-base-any.json b/tests/qapi-schema/flat-union-base-any.json
index fe66b713ef..3dfb02fa30 100644
--- a/tests/qapi-schema/flat-union-base-any.json
+++ b/tests/qapi-schema/flat-union-base-any.json
@@ -1,10 +1,23 @@
# we require the base to be an existing struct
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'any',
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-base-union.err b/tests/qapi-schema/flat-union-base-union.err
index f138395e45..ccc5e85876 100644
--- a/tests/qapi-schema/flat-union-base-union.err
+++ b/tests/qapi-schema/flat-union-base-union.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-base-union.json:14: 'base' for union 'TestUnion' cannot use union type 'UnionBase'
+tests/qapi-schema/flat-union-base-union.json:30: 'base' for union 'TestUnion' cannot use union type 'UnionBase'
diff --git a/tests/qapi-schema/flat-union-base-union.json b/tests/qapi-schema/flat-union-base-union.json
index 98b4eba181..c63c6130b8 100644
--- a/tests/qapi-schema/flat-union-base-union.json
+++ b/tests/qapi-schema/flat-union-base-union.json
@@ -2,15 +2,31 @@
# TODO: It would be possible to allow a union as a base, as long as all
# permutations of QMP names exposed by base do not clash with any QMP
# member names added by local variants.
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @UnionBase:
+##
{ 'union': 'UnionBase',
'data': { 'kind1': 'TestTypeA',
'kind2': 'TestTypeB' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'UnionBase',
'discriminator': 'type',
diff --git a/tests/qapi-schema/flat-union-clash-member.err b/tests/qapi-schema/flat-union-clash-member.err
index 2adf69755a..fe12a07e2d 100644
--- a/tests/qapi-schema/flat-union-clash-member.err
+++ b/tests/qapi-schema/flat-union-clash-member.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-clash-member.json:11: 'name' (member of Branch1) collides with 'name' (member of Base)
+tests/qapi-schema/flat-union-clash-member.json:27: 'name' (member of Branch1) collides with 'name' (member of Base)
diff --git a/tests/qapi-schema/flat-union-clash-member.json b/tests/qapi-schema/flat-union-clash-member.json
index 9efc7719b8..9000b94f16 100644
--- a/tests/qapi-schema/flat-union-clash-member.json
+++ b/tests/qapi-schema/flat-union-clash-member.json
@@ -1,13 +1,29 @@
# We check for no duplicate keys between branch members and base
# base's member 'name' clashes with Branch1's
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'enum1': 'TestEnum', '*name': 'str' } }
+##
+# @Branch1:
+##
{ 'struct': 'Branch1',
'data': { 'name': 'str' } }
+##
+# @Branch2:
+##
{ 'struct': 'Branch2',
'data': { 'value': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'Base',
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-empty.err b/tests/qapi-schema/flat-union-empty.err
index 15754f54eb..ead7bd4fcb 100644
--- a/tests/qapi-schema/flat-union-empty.err
+++ b/tests/qapi-schema/flat-union-empty.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-empty.json:4: Union 'Union' cannot have empty 'data'
+tests/qapi-schema/flat-union-empty.json:14: Union 'Union' cannot have empty 'data'
diff --git a/tests/qapi-schema/flat-union-empty.json b/tests/qapi-schema/flat-union-empty.json
index 77f1d9abfb..afa8988205 100644
--- a/tests/qapi-schema/flat-union-empty.json
+++ b/tests/qapi-schema/flat-union-empty.json
@@ -1,4 +1,14 @@
# flat unions cannot be empty
+
+##
+# @Empty:
+##
{ 'enum': 'Empty', 'data': [ ] }
+##
+# @Base:
+##
{ 'struct': 'Base', 'data': { 'type': 'Empty' } }
+##
+# @Union:
+##
{ 'union': 'Union', 'base': 'Base', 'discriminator': 'type', 'data': { } }
diff --git a/tests/qapi-schema/flat-union-incomplete-branch.err b/tests/qapi-schema/flat-union-incomplete-branch.err
index e826bf0789..c655bbfb4a 100644
--- a/tests/qapi-schema/flat-union-incomplete-branch.err
+++ b/tests/qapi-schema/flat-union-incomplete-branch.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-incomplete-branch.json:6: Union 'TestUnion' data missing 'value2' branch
+tests/qapi-schema/flat-union-incomplete-branch.json:16: Union 'TestUnion' data missing 'value2' branch
diff --git a/tests/qapi-schema/flat-union-incomplete-branch.json b/tests/qapi-schema/flat-union-incomplete-branch.json
index 25a411bc83..dea03775c7 100644
--- a/tests/qapi-schema/flat-union-incomplete-branch.json
+++ b/tests/qapi-schema/flat-union-incomplete-branch.json
@@ -1,8 +1,18 @@
# we require all branches of the union to be covered
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': { 'type': 'TestEnum' },
'discriminator': 'type',
diff --git a/tests/qapi-schema/flat-union-inline.err b/tests/qapi-schema/flat-union-inline.err
index 2333358d28..c2c3f7604b 100644
--- a/tests/qapi-schema/flat-union-inline.err
+++ b/tests/qapi-schema/flat-union-inline.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-inline.json:7: Member 'value1' of union 'TestUnion' should be a type name
+tests/qapi-schema/flat-union-inline.json:17: Member 'value1' of union 'TestUnion' should be a type name
diff --git a/tests/qapi-schema/flat-union-inline.json b/tests/qapi-schema/flat-union-inline.json
index 62c7cda617..400f0817a1 100644
--- a/tests/qapi-schema/flat-union-inline.json
+++ b/tests/qapi-schema/flat-union-inline.json
@@ -1,9 +1,19 @@
# we require branches to be a struct name
# TODO: should we allow anonymous inline branch types?
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'enum1': 'TestEnum', 'kind': 'str' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'Base',
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-int-branch.err b/tests/qapi-schema/flat-union-int-branch.err
index faf01573b7..299cbb24b2 100644
--- a/tests/qapi-schema/flat-union-int-branch.err
+++ b/tests/qapi-schema/flat-union-int-branch.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-int-branch.json:8: Member 'value1' of union 'TestUnion' cannot use built-in type 'int'
+tests/qapi-schema/flat-union-int-branch.json:21: Member 'value1' of union 'TestUnion' cannot use built-in type 'int'
diff --git a/tests/qapi-schema/flat-union-int-branch.json b/tests/qapi-schema/flat-union-int-branch.json
index 9370c349e8..9603e172f8 100644
--- a/tests/qapi-schema/flat-union-int-branch.json
+++ b/tests/qapi-schema/flat-union-int-branch.json
@@ -1,10 +1,23 @@
# we require flat union branches to be a struct
+
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'enum1': 'TestEnum' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'Base',
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-invalid-branch-key.err b/tests/qapi-schema/flat-union-invalid-branch-key.err
index ccf72d2dfe..455f2dc083 100644
--- a/tests/qapi-schema/flat-union-invalid-branch-key.err
+++ b/tests/qapi-schema/flat-union-invalid-branch-key.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-invalid-branch-key.json:13: Discriminator value 'value_wrong' is not found in enum 'TestEnum'
+tests/qapi-schema/flat-union-invalid-branch-key.json:28: Discriminator value 'value_wrong' is not found in enum 'TestEnum'
diff --git a/tests/qapi-schema/flat-union-invalid-branch-key.json b/tests/qapi-schema/flat-union-invalid-branch-key.json
index 95ff7746bf..00f28966ff 100644
--- a/tests/qapi-schema/flat-union-invalid-branch-key.json
+++ b/tests/qapi-schema/flat-union-invalid-branch-key.json
@@ -1,15 +1,30 @@
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestBase:
+##
{ 'struct': 'TestBase',
'data': { 'enum1': 'TestEnum' } }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'TestBase',
'discriminator': 'enum1',
diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.err b/tests/qapi-schema/flat-union-invalid-discriminator.err
index 5f4055614e..f0e427b0a7 100644
--- a/tests/qapi-schema/flat-union-invalid-discriminator.err
+++ b/tests/qapi-schema/flat-union-invalid-discriminator.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-invalid-discriminator.json:13: Discriminator 'enum_wrong' is not a member of base struct 'TestBase'
+tests/qapi-schema/flat-union-invalid-discriminator.json:28: Discriminator 'enum_wrong' is not a member of base struct 'TestBase'
diff --git a/tests/qapi-schema/flat-union-invalid-discriminator.json b/tests/qapi-schema/flat-union-invalid-discriminator.json
index 48b94c3a4d..c8700c7d71 100644
--- a/tests/qapi-schema/flat-union-invalid-discriminator.json
+++ b/tests/qapi-schema/flat-union-invalid-discriminator.json
@@ -1,15 +1,30 @@
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestBase:
+##
{ 'struct': 'TestBase',
'data': { 'enum1': 'TestEnum' } }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'TestBase',
'discriminator': 'enum_wrong',
diff --git a/tests/qapi-schema/flat-union-no-base.err b/tests/qapi-schema/flat-union-no-base.err
index 841c93b554..a2d0a81aa0 100644
--- a/tests/qapi-schema/flat-union-no-base.err
+++ b/tests/qapi-schema/flat-union-no-base.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-no-base.json:9: Flat union 'TestUnion' must have a base
+tests/qapi-schema/flat-union-no-base.json:22: Flat union 'TestUnion' must have a base
diff --git a/tests/qapi-schema/flat-union-no-base.json b/tests/qapi-schema/flat-union-no-base.json
index ffc4c6f0e6..641f68aea4 100644
--- a/tests/qapi-schema/flat-union-no-base.json
+++ b/tests/qapi-schema/flat-union-no-base.json
@@ -1,11 +1,24 @@
# flat unions require a base
# TODO: simple unions should be able to use an enum discriminator
+
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @Enum:
+##
{ 'enum': 'Enum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'discriminator': 'Enum',
'data': { 'value1': 'TestTypeA',
diff --git a/tests/qapi-schema/flat-union-optional-discriminator.err b/tests/qapi-schema/flat-union-optional-discriminator.err
index aaabedb3bd..e15f8564dd 100644
--- a/tests/qapi-schema/flat-union-optional-discriminator.err
+++ b/tests/qapi-schema/flat-union-optional-discriminator.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-optional-discriminator.json:6: Discriminator of flat union 'MyUnion' does not allow optional name '*switch'
+tests/qapi-schema/flat-union-optional-discriminator.json:19: Discriminator of flat union 'MyUnion' does not allow optional name '*switch'
diff --git a/tests/qapi-schema/flat-union-optional-discriminator.json b/tests/qapi-schema/flat-union-optional-discriminator.json
index 08a8f7ef8b..9f19af5789 100644
--- a/tests/qapi-schema/flat-union-optional-discriminator.json
+++ b/tests/qapi-schema/flat-union-optional-discriminator.json
@@ -1,8 +1,21 @@
# we require the discriminator to be non-optional
+
+##
+# @Enum:
+##
{ 'enum': 'Enum', 'data': [ 'one', 'two' ] }
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { '*switch': 'Enum' } }
+##
+# @Branch:
+##
{ 'struct': 'Branch', 'data': { 'name': 'str' } }
+##
+# @MyUnion:
+##
{ 'union': 'MyUnion',
'base': 'Base',
'discriminator': '*switch',
diff --git a/tests/qapi-schema/flat-union-string-discriminator.err b/tests/qapi-schema/flat-union-string-discriminator.err
index 200016bd5c..bc0c133aa9 100644
--- a/tests/qapi-schema/flat-union-string-discriminator.err
+++ b/tests/qapi-schema/flat-union-string-discriminator.err
@@ -1 +1 @@
-tests/qapi-schema/flat-union-string-discriminator.json:13: Discriminator 'kind' must be of enumeration type
+tests/qapi-schema/flat-union-string-discriminator.json:28: Discriminator 'kind' must be of enumeration type
diff --git a/tests/qapi-schema/flat-union-string-discriminator.json b/tests/qapi-schema/flat-union-string-discriminator.json
index 8af60333b6..47a17d2e4a 100644
--- a/tests/qapi-schema/flat-union-string-discriminator.json
+++ b/tests/qapi-schema/flat-union-string-discriminator.json
@@ -1,15 +1,30 @@
+##
+# @TestEnum:
+##
{ 'enum': 'TestEnum',
'data': [ 'value1', 'value2' ] }
+##
+# @TestBase:
+##
{ 'struct': 'TestBase',
'data': { 'enum1': 'TestEnum', 'kind': 'str' } }
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'TestBase',
'discriminator': 'kind',
diff --git a/tests/qapi-schema/ident-with-escape.json b/tests/qapi-schema/ident-with-escape.json
index 56617501e7..c03404bee3 100644
--- a/tests/qapi-schema/ident-with-escape.json
+++ b/tests/qapi-schema/ident-with-escape.json
@@ -1,4 +1,8 @@
# we allow escape sequences in strings, if they map back to ASCII
# { 'command': 'fooA', 'data': { 'bar1': 'str' } }
+
+##
+# @fooA:
+##
{ 'c\u006fmmand': '\u0066\u006f\u006FA',
'd\u0061ta': { '\u0062\u0061\u00721': '\u0073\u0074\u0072' } }
diff --git a/tests/qapi-schema/ident-with-escape.out b/tests/qapi-schema/ident-with-escape.out
index 1d2722c02e..69fc908e68 100644
--- a/tests/qapi-schema/ident-with-escape.out
+++ b/tests/qapi-schema/ident-with-escape.out
@@ -5,3 +5,4 @@ command fooA q_obj_fooA-arg -> None
object q_empty
object q_obj_fooA-arg
member bar1: str optional=False
+doc symbol=fooA expr=('command', 'fooA')
diff --git a/tests/qapi-schema/include-relpath-sub.json b/tests/qapi-schema/include-relpath-sub.json
index 4bd4af4162..b4bd8a23d7 100644
--- a/tests/qapi-schema/include-relpath-sub.json
+++ b/tests/qapi-schema/include-relpath-sub.json
@@ -1,2 +1,5 @@
+##
+# @Status:
+##
{ 'enum': 'Status',
'data': [ 'good', 'bad', 'ugly' ] }
diff --git a/tests/qapi-schema/include-relpath.out b/tests/qapi-schema/include-relpath.out
index 5d7c13cad1..a962fb2d2e 100644
--- a/tests/qapi-schema/include-relpath.out
+++ b/tests/qapi-schema/include-relpath.out
@@ -2,3 +2,4 @@ enum QType ['none', 'qnull', 'qint', 'qstring', 'qdict', 'qlist', 'qfloat', 'qbo
prefix QTYPE
enum Status ['good', 'bad', 'ugly']
object q_empty
+doc symbol=Status expr=('enum', 'Status')
diff --git a/tests/qapi-schema/include-repetition.out b/tests/qapi-schema/include-repetition.out
index 5d7c13cad1..a962fb2d2e 100644
--- a/tests/qapi-schema/include-repetition.out
+++ b/tests/qapi-schema/include-repetition.out
@@ -2,3 +2,4 @@ enum QType ['none', 'qnull', 'qint', 'qstring', 'qdict', 'qlist', 'qfloat', 'qbo
prefix QTYPE
enum Status ['good', 'bad', 'ugly']
object q_empty
+doc symbol=Status expr=('enum', 'Status')
diff --git a/tests/qapi-schema/include-simple-sub.json b/tests/qapi-schema/include-simple-sub.json
index 4bd4af4162..b4bd8a23d7 100644
--- a/tests/qapi-schema/include-simple-sub.json
+++ b/tests/qapi-schema/include-simple-sub.json
@@ -1,2 +1,5 @@
+##
+# @Status:
+##
{ 'enum': 'Status',
'data': [ 'good', 'bad', 'ugly' ] }
diff --git a/tests/qapi-schema/include-simple.out b/tests/qapi-schema/include-simple.out
index 5d7c13cad1..a962fb2d2e 100644
--- a/tests/qapi-schema/include-simple.out
+++ b/tests/qapi-schema/include-simple.out
@@ -2,3 +2,4 @@ enum QType ['none', 'qnull', 'qint', 'qstring', 'qdict', 'qlist', 'qfloat', 'qbo
prefix QTYPE
enum Status ['good', 'bad', 'ugly']
object q_empty
+doc symbol=Status expr=('enum', 'Status')
diff --git a/tests/qapi-schema/indented-expr.json b/tests/qapi-schema/indented-expr.json
index 7115d3131e..d759be1877 100644
--- a/tests/qapi-schema/indented-expr.json
+++ b/tests/qapi-schema/indented-expr.json
@@ -1,2 +1,8 @@
+##
+# @eins:
+##
{ 'command' : 'eins' }
+##
+# @zwei:
+##
{ 'command' : 'zwei' }
diff --git a/tests/qapi-schema/indented-expr.out b/tests/qapi-schema/indented-expr.out
index e8171c935f..285d052257 100644
--- a/tests/qapi-schema/indented-expr.out
+++ b/tests/qapi-schema/indented-expr.out
@@ -5,3 +5,5 @@ command eins None -> None
object q_empty
command zwei None -> None
gen=True success_response=True boxed=False
+doc symbol=eins expr=('command', 'eins')
+doc symbol=zwei expr=('command', 'zwei')
diff --git a/tests/qapi-schema/missing-type.err b/tests/qapi-schema/missing-type.err
index b3e7b14e42..74c4ef7324 100644
--- a/tests/qapi-schema/missing-type.err
+++ b/tests/qapi-schema/missing-type.err
@@ -1 +1 @@
-tests/qapi-schema/missing-type.json:2: Expression is missing metatype
+tests/qapi-schema/missing-type.json:6: Expression is missing metatype
diff --git a/tests/qapi-schema/missing-type.json b/tests/qapi-schema/missing-type.json
index ff5349d3fe..c2fc62d0af 100644
--- a/tests/qapi-schema/missing-type.json
+++ b/tests/qapi-schema/missing-type.json
@@ -1,2 +1,6 @@
# we reject an expression with missing metatype
+
+##
+# @foo:
+##
{ 'data': { } }
diff --git a/tests/qapi-schema/nested-struct-data.err b/tests/qapi-schema/nested-struct-data.err
index da767bade2..379bd1d3f4 100644
--- a/tests/qapi-schema/nested-struct-data.err
+++ b/tests/qapi-schema/nested-struct-data.err
@@ -1 +1 @@
-tests/qapi-schema/nested-struct-data.json:2: Member 'a' of 'data' for command 'foo' should be a type name
+tests/qapi-schema/nested-struct-data.json:6: Member 'a' of 'data' for command 'foo' should be a type name
diff --git a/tests/qapi-schema/nested-struct-data.json b/tests/qapi-schema/nested-struct-data.json
index efbe773ded..6106e15e86 100644
--- a/tests/qapi-schema/nested-struct-data.json
+++ b/tests/qapi-schema/nested-struct-data.json
@@ -1,3 +1,7 @@
# inline subtypes collide with our desired future use of defaults
+
+##
+# @foo:
+##
{ 'command': 'foo',
'data': { 'a' : { 'string' : 'str', 'integer': 'int' }, 'b' : 'str' } }
diff --git a/tests/qapi-schema/qapi-schema-test.json b/tests/qapi-schema/qapi-schema-test.json
index 17194637ba..f4d8cc4230 100644
--- a/tests/qapi-schema/qapi-schema-test.json
+++ b/tests/qapi-schema/qapi-schema-test.json
@@ -3,67 +3,153 @@
# This file is a stress test of supported qapi constructs that must
# parse and compile correctly.
+##
+# = Section
+# == subsection
+#
+# Some text foo with *strong* and _emphasis_
+# 1. with a list
+# 2. like that @foo
+#
+# And some code:
+# | $ echo foo
+# | -> do this
+# | <- get that
+#
+# Note: is not a meta
+##
+
+##
+# @TestStruct:
+#
+# body with @var
+#
+# @integer: foo
+# blah
+#
+# bao
+#
+# @boolean: bar
+# @string: baz
+#
+# Example:
+#
+# -> { "execute": ... }
+# <- { "return": ... }
+#
+# Since: 2.3
+# Note: a note
+#
+##
{ 'struct': 'TestStruct',
'data': { 'integer': 'int', 'boolean': 'bool', 'string': 'str' } }
+##
+# @NestedEnumsOne:
# for testing enums
+##
{ 'struct': 'NestedEnumsOne',
'data': { 'enum1': 'EnumOne', # Intentional forward reference
'*enum2': 'EnumOne', 'enum3': 'EnumOne', '*enum4': 'EnumOne' } }
+##
+# @MyEnum:
# An empty enum, although unusual, is currently acceptable
+##
{ 'enum': 'MyEnum', 'data': [ ] }
+##
+# @Empty1:
# Likewise for an empty struct, including an empty base
+##
{ 'struct': 'Empty1', 'data': { } }
+##
+# @Empty2:
+##
{ 'struct': 'Empty2', 'base': 'Empty1', 'data': { } }
+##
+# @user_def_cmd0:
+##
{ 'command': 'user_def_cmd0', 'data': 'Empty2', 'returns': 'Empty2' }
+##
+# @QEnumTwo:
# for testing override of default naming heuristic
+##
{ 'enum': 'QEnumTwo',
'prefix': 'QENUM_TWO',
'data': [ 'value1', 'value2' ] }
+##
+# @UserDefOne:
# for testing nested structs
+##
{ 'struct': 'UserDefOne',
'base': 'UserDefZero', # intentional forward reference
'data': { 'string': 'str',
'*enum1': 'EnumOne' } } # intentional forward reference
+##
+# @EnumOne:
+##
{ 'enum': 'EnumOne',
'data': [ 'value1', 'value2', 'value3' ] }
+##
+# @UserDefZero:
+##
{ 'struct': 'UserDefZero',
'data': { 'integer': 'int' } }
+##
+# @UserDefTwoDictDict:
+##
{ 'struct': 'UserDefTwoDictDict',
'data': { 'userdef': 'UserDefOne', 'string': 'str' } }
+##
+# @UserDefTwoDict:
+##
{ 'struct': 'UserDefTwoDict',
'data': { 'string1': 'str',
'dict2': 'UserDefTwoDictDict',
'*dict3': 'UserDefTwoDictDict' } }
+##
+# @UserDefTwo:
+##
{ 'struct': 'UserDefTwo',
'data': { 'string0': 'str',
'dict1': 'UserDefTwoDict' } }
+##
+# @ForceArrays:
# dummy struct to force generation of array types not otherwise mentioned
+##
{ 'struct': 'ForceArrays',
'data': { 'unused1':['UserDefOne'], 'unused2':['UserDefTwo'],
'unused3':['TestStruct'] } }
+##
+# @UserDefA:
# for testing unions
# Among other things, test that a name collision between branches does
# not cause any problems (since only one branch can be in use at a time),
# by intentionally using two branches that both have a C member 'a_b'
+##
{ 'struct': 'UserDefA',
'data': { 'boolean': 'bool', '*a_b': 'int' } }
+##
+# @UserDefB:
+##
{ 'struct': 'UserDefB',
'data': { 'intb': 'int', '*a-b': 'bool' } }
+##
+# @UserDefFlatUnion:
+##
{ 'union': 'UserDefFlatUnion',
'base': 'UserDefUnionBase', # intentional forward reference
'discriminator': 'enum1',
@@ -71,35 +157,71 @@
'value2' : 'UserDefB',
'value3' : 'UserDefB' } }
+##
+# @UserDefUnionBase:
+##
{ 'struct': 'UserDefUnionBase',
'base': 'UserDefZero',
'data': { 'string': 'str', 'enum1': 'EnumOne' } }
+##
+# @UserDefFlatUnion2:
# this variant of UserDefFlatUnion defaults to a union that uses members with
# allocated types to test corner cases in the cleanup/dealloc visitor
+##
{ 'union': 'UserDefFlatUnion2',
'base': { '*integer': 'int', 'string': 'str', 'enum1': 'QEnumTwo' },
'discriminator': 'enum1',
'data': { 'value1' : 'UserDefC', # intentional forward reference
'value2' : 'UserDefB' } }
+##
+# @WrapAlternate:
+##
{ 'struct': 'WrapAlternate',
'data': { 'alt': 'UserDefAlternate' } }
+##
+# @UserDefAlternate:
+##
{ 'alternate': 'UserDefAlternate',
'data': { 'udfu': 'UserDefFlatUnion', 's': 'str', 'i': 'int' } }
+##
+# @UserDefC:
+##
{ 'struct': 'UserDefC',
'data': { 'string1': 'str', 'string2': 'str' } }
# for testing use of 'number' within alternates
+##
+# @AltStrBool:
+##
{ 'alternate': 'AltStrBool', 'data': { 's': 'str', 'b': 'bool' } }
+##
+# @AltStrNum:
+##
{ 'alternate': 'AltStrNum', 'data': { 's': 'str', 'n': 'number' } }
+##
+# @AltNumStr:
+##
{ 'alternate': 'AltNumStr', 'data': { 'n': 'number', 's': 'str' } }
+##
+# @AltStrInt:
+##
{ 'alternate': 'AltStrInt', 'data': { 's': 'str', 'i': 'int' } }
+##
+# @AltIntNum:
+##
{ 'alternate': 'AltIntNum', 'data': { 'i': 'int', 'n': 'number' } }
+##
+# @AltNumInt:
+##
{ 'alternate': 'AltNumInt', 'data': { 'n': 'number', 'i': 'int' } }
+##
+# @UserDefNativeListUnion:
# for testing native lists
+##
{ 'union': 'UserDefNativeListUnion',
'data': { 'integer': ['int'],
's8': ['int8'],
@@ -117,19 +239,61 @@
'any': ['any'] } }
# testing commands
+##
+# @user_def_cmd:
+##
{ 'command': 'user_def_cmd', 'data': {} }
+##
+# @user_def_cmd1:
+##
{ 'command': 'user_def_cmd1', 'data': {'ud1a': 'UserDefOne'} }
+##
+# @user_def_cmd2:
+##
{ 'command': 'user_def_cmd2',
'data': {'ud1a': 'UserDefOne', '*ud1b': 'UserDefOne'},
'returns': 'UserDefTwo' }
+##
+# Another comment
+##
+
+##
+# @guest-get-time:
+#
+# @guest-get-time body
+#
+# @a: an integer
+# @b: #optional integer
+#
+# Returns: returns something
+#
+# Example:
+#
+# -> { "execute": "guest-get-time", ... }
+# <- { "return": "42" }
+#
+##
+
# Returning a non-dictionary requires a name from the whitelist
{ 'command': 'guest-get-time', 'data': {'a': 'int', '*b': 'int' },
'returns': 'int' }
+##
+# @guest-sync:
+##
{ 'command': 'guest-sync', 'data': { 'arg': 'any' }, 'returns': 'any' }
+##
+# @boxed-struct:
+##
{ 'command': 'boxed-struct', 'boxed': true, 'data': 'UserDefZero' }
+##
+# @boxed-union:
+##
{ 'command': 'boxed-union', 'data': 'UserDefNativeListUnion', 'boxed': true }
+##
+# @UserDefOptions:
+#
# For testing integer range flattening in opts-visitor. The following schema
# corresponds to the option format:
#
@@ -137,6 +301,7 @@
#
# For simplicity, this example doesn't use [type=]discriminator nor optargs
# specific to discriminator values.
+##
{ 'struct': 'UserDefOptions',
'data': {
'*i64' : [ 'int' ],
@@ -146,35 +311,83 @@
'*u64x': 'uint64' } }
# testing event
+##
+# @EventStructOne:
+##
{ 'struct': 'EventStructOne',
'data': { 'struct1': 'UserDefOne', 'string': 'str', '*enum2': 'EnumOne' } }
+##
+# @EVENT_A:
+##
{ 'event': 'EVENT_A' }
+##
+# @EVENT_B:
+##
{ 'event': 'EVENT_B',
'data': { } }
+##
+# @EVENT_C:
+##
{ 'event': 'EVENT_C',
'data': { '*a': 'int', '*b': 'UserDefOne', 'c': 'str' } }
+##
+# @EVENT_D:
+##
{ 'event': 'EVENT_D',
'data': { 'a' : 'EventStructOne', 'b' : 'str', '*c': 'str', '*enum3': 'EnumOne' } }
+##
+# @EVENT_E:
+##
{ 'event': 'EVENT_E', 'boxed': true, 'data': 'UserDefZero' }
+##
+# @EVENT_F:
+##
{ 'event': 'EVENT_F', 'boxed': true, 'data': 'UserDefAlternate' }
# test that we correctly compile downstream extensions, as well as munge
# ticklish names
+##
+# @__org.qemu_x-Enum:
+##
{ 'enum': '__org.qemu_x-Enum', 'data': [ '__org.qemu_x-value' ] }
+##
+# @__org.qemu_x-Base:
+##
{ 'struct': '__org.qemu_x-Base',
'data': { '__org.qemu_x-member1': '__org.qemu_x-Enum' } }
+##
+# @__org.qemu_x-Struct:
+##
{ 'struct': '__org.qemu_x-Struct', 'base': '__org.qemu_x-Base',
'data': { '__org.qemu_x-member2': 'str', '*wchar-t': 'int' } }
+##
+# @__org.qemu_x-Union1:
+##
{ 'union': '__org.qemu_x-Union1', 'data': { '__org.qemu_x-branch': 'str' } }
+##
+# @__org.qemu_x-Struct2:
+##
{ 'struct': '__org.qemu_x-Struct2',
'data': { 'array': ['__org.qemu_x-Union1'] } }
+##
+# @__org.qemu_x-Union2:
+##
{ 'union': '__org.qemu_x-Union2', 'base': '__org.qemu_x-Base',
'discriminator': '__org.qemu_x-member1',
'data': { '__org.qemu_x-value': '__org.qemu_x-Struct2' } }
+##
+# @__org.qemu_x-Alt:
+##
{ 'alternate': '__org.qemu_x-Alt',
'data': { '__org.qemu_x-branch': 'str', 'b': '__org.qemu_x-Base' } }
+##
+# @__ORG.QEMU_X-EVENT:
+##
{ 'event': '__ORG.QEMU_X-EVENT', 'data': '__org.qemu_x-Struct' }
+##
+# @__org.qemu_x-command:
+##
{ 'command': '__org.qemu_x-command',
'data': { 'a': ['__org.qemu_x-Enum'], 'b': ['__org.qemu_x-Struct'],
'c': '__org.qemu_x-Union2', 'd': '__org.qemu_x-Alt' },
diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out
index 9d99c4eebb..bc8d496ff4 100644
--- a/tests/qapi-schema/qapi-schema-test.out
+++ b/tests/qapi-schema/qapi-schema-test.out
@@ -232,3 +232,133 @@ command user_def_cmd1 q_obj_user_def_cmd1-arg -> None
gen=True success_response=True boxed=False
command user_def_cmd2 q_obj_user_def_cmd2-arg -> UserDefTwo
gen=True success_response=True boxed=False
+doc freeform
+ body=
+= Section
+== subsection
+
+Some text foo with *strong* and _emphasis_
+1. with a list
+2. like that @foo
+
+And some code:
+| $ echo foo
+| -> do this
+| <- get that
+
+Note: is not a meta
+doc symbol=TestStruct expr=('struct', 'TestStruct')
+ arg=integer
+foo
+blah
+
+bao
+ arg=boolean
+bar
+ arg=string
+baz
+ section=Example
+-> { "execute": ... }
+<- { "return": ... }
+ section=Since
+2.3
+ section=Note
+a note
+ body=
+body with @var
+doc symbol=NestedEnumsOne expr=('struct', 'NestedEnumsOne')
+ body=
+for testing enums
+doc symbol=MyEnum expr=('enum', 'MyEnum')
+ body=
+An empty enum, although unusual, is currently acceptable
+doc symbol=Empty1 expr=('struct', 'Empty1')
+ body=
+Likewise for an empty struct, including an empty base
+doc symbol=Empty2 expr=('struct', 'Empty2')
+doc symbol=user_def_cmd0 expr=('command', 'user_def_cmd0')
+doc symbol=QEnumTwo expr=('enum', 'QEnumTwo')
+ body=
+for testing override of default naming heuristic
+doc symbol=UserDefOne expr=('struct', 'UserDefOne')
+ body=
+for testing nested structs
+doc symbol=EnumOne expr=('enum', 'EnumOne')
+doc symbol=UserDefZero expr=('struct', 'UserDefZero')
+doc symbol=UserDefTwoDictDict expr=('struct', 'UserDefTwoDictDict')
+doc symbol=UserDefTwoDict expr=('struct', 'UserDefTwoDict')
+doc symbol=UserDefTwo expr=('struct', 'UserDefTwo')
+doc symbol=ForceArrays expr=('struct', 'ForceArrays')
+ body=
+dummy struct to force generation of array types not otherwise mentioned
+doc symbol=UserDefA expr=('struct', 'UserDefA')
+ body=
+for testing unions
+Among other things, test that a name collision between branches does
+not cause any problems (since only one branch can be in use at a time),
+by intentionally using two branches that both have a C member 'a_b'
+doc symbol=UserDefB expr=('struct', 'UserDefB')
+doc symbol=UserDefFlatUnion expr=('union', 'UserDefFlatUnion')
+doc symbol=UserDefUnionBase expr=('struct', 'UserDefUnionBase')
+doc symbol=UserDefFlatUnion2 expr=('union', 'UserDefFlatUnion2')
+ body=
+this variant of UserDefFlatUnion defaults to a union that uses members with
+allocated types to test corner cases in the cleanup/dealloc visitor
+doc symbol=WrapAlternate expr=('struct', 'WrapAlternate')
+doc symbol=UserDefAlternate expr=('alternate', 'UserDefAlternate')
+doc symbol=UserDefC expr=('struct', 'UserDefC')
+doc symbol=AltStrBool expr=('alternate', 'AltStrBool')
+doc symbol=AltStrNum expr=('alternate', 'AltStrNum')
+doc symbol=AltNumStr expr=('alternate', 'AltNumStr')
+doc symbol=AltStrInt expr=('alternate', 'AltStrInt')
+doc symbol=AltIntNum expr=('alternate', 'AltIntNum')
+doc symbol=AltNumInt expr=('alternate', 'AltNumInt')
+doc symbol=UserDefNativeListUnion expr=('union', 'UserDefNativeListUnion')
+ body=
+for testing native lists
+doc symbol=user_def_cmd expr=('command', 'user_def_cmd')
+doc symbol=user_def_cmd1 expr=('command', 'user_def_cmd1')
+doc symbol=user_def_cmd2 expr=('command', 'user_def_cmd2')
+doc freeform
+ body=
+Another comment
+doc symbol=guest-get-time expr=('command', 'guest-get-time')
+ arg=a
+an integer
+ arg=b
+#optional integer
+ section=Returns
+returns something
+ section=Example
+-> { "execute": "guest-get-time", ... }
+<- { "return": "42" }
+ body=
+@guest-get-time body
+doc symbol=guest-sync expr=('command', 'guest-sync')
+doc symbol=boxed-struct expr=('command', 'boxed-struct')
+doc symbol=boxed-union expr=('command', 'boxed-union')
+doc symbol=UserDefOptions expr=('struct', 'UserDefOptions')
+ body=
+For testing integer range flattening in opts-visitor. The following schema
+corresponds to the option format:
+
+-userdef i64=3-6,i64=-5--1,u64=2,u16=1,u16=7-12
+
+For simplicity, this example doesn't use [type=]discriminator nor optargs
+specific to discriminator values.
+doc symbol=EventStructOne expr=('struct', 'EventStructOne')
+doc symbol=EVENT_A expr=('event', 'EVENT_A')
+doc symbol=EVENT_B expr=('event', 'EVENT_B')
+doc symbol=EVENT_C expr=('event', 'EVENT_C')
+doc symbol=EVENT_D expr=('event', 'EVENT_D')
+doc symbol=EVENT_E expr=('event', 'EVENT_E')
+doc symbol=EVENT_F expr=('event', 'EVENT_F')
+doc symbol=__org.qemu_x-Enum expr=('enum', '__org.qemu_x-Enum')
+doc symbol=__org.qemu_x-Base expr=('struct', '__org.qemu_x-Base')
+doc symbol=__org.qemu_x-Struct expr=('struct', '__org.qemu_x-Struct')
+doc symbol=__org.qemu_x-Union1 expr=('union', '__org.qemu_x-Union1')
+doc symbol=__org.qemu_x-Struct2 expr=('struct', '__org.qemu_x-Struct2')
+doc symbol=__org.qemu_x-Union2 expr=('union', '__org.qemu_x-Union2')
+doc symbol=__org.qemu_x-Alt expr=('alternate', '__org.qemu_x-Alt')
+doc symbol=__ORG.QEMU_X-EVENT expr=('event', '__ORG.QEMU_X-EVENT')
+doc symbol=__org.qemu_x-command expr=('command', '__org.qemu_x-command')
diff --git a/tests/qapi-schema/redefined-builtin.err b/tests/qapi-schema/redefined-builtin.err
index b2757225c4..ee0a2adf0b 100644
--- a/tests/qapi-schema/redefined-builtin.err
+++ b/tests/qapi-schema/redefined-builtin.err
@@ -1 +1 @@
-tests/qapi-schema/redefined-builtin.json:2: built-in 'size' is already defined
+tests/qapi-schema/redefined-builtin.json:6: built-in 'size' is already defined
diff --git a/tests/qapi-schema/redefined-builtin.json b/tests/qapi-schema/redefined-builtin.json
index 45b8a550ad..6d3a940d5e 100644
--- a/tests/qapi-schema/redefined-builtin.json
+++ b/tests/qapi-schema/redefined-builtin.json
@@ -1,2 +1,6 @@
# we reject types that duplicate builtin names
+
+##
+# @size:
+##
{ 'struct': 'size', 'data': { 'myint': 'size' } }
diff --git a/tests/qapi-schema/redefined-command.err b/tests/qapi-schema/redefined-command.err
index 82ae256e63..1e297c43ba 100644
--- a/tests/qapi-schema/redefined-command.err
+++ b/tests/qapi-schema/redefined-command.err
@@ -1 +1 @@
-tests/qapi-schema/redefined-command.json:3: command 'foo' is already defined
+tests/qapi-schema/redefined-command.json:10: command 'foo' is already defined
diff --git a/tests/qapi-schema/redefined-command.json b/tests/qapi-schema/redefined-command.json
index 247e401948..3a8cb9024c 100644
--- a/tests/qapi-schema/redefined-command.json
+++ b/tests/qapi-schema/redefined-command.json
@@ -1,3 +1,10 @@
# we reject commands defined more than once
+
+##
+# @foo:
+##
{ 'command': 'foo', 'data': { 'one': 'str' } }
+##
+# @foo:
+##
{ 'command': 'foo', 'data': { '*two': 'str' } }
diff --git a/tests/qapi-schema/redefined-event.err b/tests/qapi-schema/redefined-event.err
index 35429cb481..912c785119 100644
--- a/tests/qapi-schema/redefined-event.err
+++ b/tests/qapi-schema/redefined-event.err
@@ -1 +1 @@
-tests/qapi-schema/redefined-event.json:3: event 'EVENT_A' is already defined
+tests/qapi-schema/redefined-event.json:10: event 'EVENT_A' is already defined
diff --git a/tests/qapi-schema/redefined-event.json b/tests/qapi-schema/redefined-event.json
index 7717e91c18..ec7aeea0f0 100644
--- a/tests/qapi-schema/redefined-event.json
+++ b/tests/qapi-schema/redefined-event.json
@@ -1,3 +1,10 @@
# we reject duplicate events
+
+##
+# @EVENT_A:
+##
{ 'event': 'EVENT_A', 'data': { 'myint': 'int' } }
+##
+# @EVENT_A:
+##
{ 'event': 'EVENT_A', 'data': { 'myint': 'int' } }
diff --git a/tests/qapi-schema/redefined-type.err b/tests/qapi-schema/redefined-type.err
index 06ea78c478..28d87c098c 100644
--- a/tests/qapi-schema/redefined-type.err
+++ b/tests/qapi-schema/redefined-type.err
@@ -1 +1 @@
-tests/qapi-schema/redefined-type.json:3: struct 'foo' is already defined
+tests/qapi-schema/redefined-type.json:10: struct 'foo' is already defined
diff --git a/tests/qapi-schema/redefined-type.json b/tests/qapi-schema/redefined-type.json
index a09e768bae..7a8f3e1ec8 100644
--- a/tests/qapi-schema/redefined-type.json
+++ b/tests/qapi-schema/redefined-type.json
@@ -1,3 +1,10 @@
# we reject types defined more than once
+
+##
+# @foo:
+##
{ 'struct': 'foo', 'data': { 'one': 'str' } }
+##
+# @foo:
+##
{ 'enum': 'foo', 'data': [ 'two' ] }
diff --git a/tests/qapi-schema/reserved-command-q.err b/tests/qapi-schema/reserved-command-q.err
index f939e044eb..5e17f3169b 100644
--- a/tests/qapi-schema/reserved-command-q.err
+++ b/tests/qapi-schema/reserved-command-q.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-command-q.json:5: 'command' uses invalid name 'q-unix'
+tests/qapi-schema/reserved-command-q.json:12: 'command' uses invalid name 'q-unix'
diff --git a/tests/qapi-schema/reserved-command-q.json b/tests/qapi-schema/reserved-command-q.json
index 99f8aae314..bba0860c99 100644
--- a/tests/qapi-schema/reserved-command-q.json
+++ b/tests/qapi-schema/reserved-command-q.json
@@ -1,5 +1,12 @@
# C entity name collision
# We reject names like 'q-unix', because they can collide with the mangled
# name for 'unix' in generated C.
+
+##
+# @unix:
+##
{ 'command': 'unix' }
+##
+# @q-unix:
+##
{ 'command': 'q-unix' }
diff --git a/tests/qapi-schema/reserved-enum-q.err b/tests/qapi-schema/reserved-enum-q.err
index e1c3480ee2..acb2df811d 100644
--- a/tests/qapi-schema/reserved-enum-q.err
+++ b/tests/qapi-schema/reserved-enum-q.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-enum-q.json:4: Member of enum 'Foo' uses invalid name 'q-Unix'
+tests/qapi-schema/reserved-enum-q.json:8: Member of enum 'Foo' uses invalid name 'q-Unix'
diff --git a/tests/qapi-schema/reserved-enum-q.json b/tests/qapi-schema/reserved-enum-q.json
index 3593a765ea..6c7e7177c3 100644
--- a/tests/qapi-schema/reserved-enum-q.json
+++ b/tests/qapi-schema/reserved-enum-q.json
@@ -1,4 +1,8 @@
# C entity name collision
# We reject names like 'q-unix', because they can collide with the mangled
# name for 'unix' in generated C.
+
+##
+# @Foo:
+##
{ 'enum': 'Foo', 'data': [ 'unix', 'q-Unix' ] }
diff --git a/tests/qapi-schema/reserved-member-has.err b/tests/qapi-schema/reserved-member-has.err
index e755771446..9ace796055 100644
--- a/tests/qapi-schema/reserved-member-has.err
+++ b/tests/qapi-schema/reserved-member-has.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-member-has.json:5: Member of 'data' for command 'oops' uses reserved name 'has-a'
+tests/qapi-schema/reserved-member-has.json:9: Member of 'data' for command 'oops' uses reserved name 'has-a'
diff --git a/tests/qapi-schema/reserved-member-has.json b/tests/qapi-schema/reserved-member-has.json
index 45b9109bdc..f0d8905ca2 100644
--- a/tests/qapi-schema/reserved-member-has.json
+++ b/tests/qapi-schema/reserved-member-has.json
@@ -2,4 +2,8 @@
# We reject names like 'has-a', because they can collide with the flag
# for an optional 'a' in generated C.
# TODO we could munge the optional flag name to avoid the collision.
+
+##
+# @oops:
+##
{ 'command': 'oops', 'data': { '*a': 'str', 'has-a': 'str' } }
diff --git a/tests/qapi-schema/reserved-member-q.err b/tests/qapi-schema/reserved-member-q.err
index f3d5dd7818..1709a88462 100644
--- a/tests/qapi-schema/reserved-member-q.err
+++ b/tests/qapi-schema/reserved-member-q.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-member-q.json:4: Member of 'data' for struct 'Foo' uses invalid name 'q-unix'
+tests/qapi-schema/reserved-member-q.json:8: Member of 'data' for struct 'Foo' uses invalid name 'q-unix'
diff --git a/tests/qapi-schema/reserved-member-q.json b/tests/qapi-schema/reserved-member-q.json
index 62fed8fddf..f51e312917 100644
--- a/tests/qapi-schema/reserved-member-q.json
+++ b/tests/qapi-schema/reserved-member-q.json
@@ -1,4 +1,8 @@
# C member name collision
# We reject names like 'q-unix', because they can collide with the mangled
# name for 'unix' in generated C.
+
+##
+# @Foo:
+##
{ 'struct': 'Foo', 'data': { 'unix':'int', 'q-unix':'bool' } }
diff --git a/tests/qapi-schema/reserved-member-u.err b/tests/qapi-schema/reserved-member-u.err
index 87d42296cc..6ec69a712a 100644
--- a/tests/qapi-schema/reserved-member-u.err
+++ b/tests/qapi-schema/reserved-member-u.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-member-u.json:7: Member of 'data' for struct 'Oops' uses reserved name 'u'
+tests/qapi-schema/reserved-member-u.json:11: Member of 'data' for struct 'Oops' uses reserved name 'u'
diff --git a/tests/qapi-schema/reserved-member-u.json b/tests/qapi-schema/reserved-member-u.json
index 1eaf0f301c..3a578e5b56 100644
--- a/tests/qapi-schema/reserved-member-u.json
+++ b/tests/qapi-schema/reserved-member-u.json
@@ -4,4 +4,8 @@
# This is true even for non-unions, because it is possible to convert a
# struct to flat union while remaining backwards compatible in QMP.
# TODO - we could munge the member name to 'q_u' to avoid the collision
+
+##
+# @Oops:
+##
{ 'struct': 'Oops', 'data': { 'u': 'str' } }
diff --git a/tests/qapi-schema/reserved-member-underscore.err b/tests/qapi-schema/reserved-member-underscore.err
index 65ff0da8ce..c9aefee3a8 100644
--- a/tests/qapi-schema/reserved-member-underscore.err
+++ b/tests/qapi-schema/reserved-member-underscore.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-member-underscore.json:4: Member of 'data' for struct 'Oops' uses invalid name '_oops'
+tests/qapi-schema/reserved-member-underscore.json:8: Member of 'data' for struct 'Oops' uses invalid name '_oops'
diff --git a/tests/qapi-schema/reserved-member-underscore.json b/tests/qapi-schema/reserved-member-underscore.json
index 4a3a017638..cc34b54b02 100644
--- a/tests/qapi-schema/reserved-member-underscore.json
+++ b/tests/qapi-schema/reserved-member-underscore.json
@@ -1,4 +1,8 @@
# C member name collision
# We reject use of a single leading underscore in all names (names must
# begin with a letter or a downstream extension double-underscore prefix).
+
+##
+# @Oops:
+##
{ 'struct': 'Oops', 'data': { '_oops': 'str' } }
diff --git a/tests/qapi-schema/reserved-type-kind.err b/tests/qapi-schema/reserved-type-kind.err
index 0a38efaad8..8698073062 100644
--- a/tests/qapi-schema/reserved-type-kind.err
+++ b/tests/qapi-schema/reserved-type-kind.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-type-kind.json:2: enum 'UnionKind' should not end in 'Kind'
+tests/qapi-schema/reserved-type-kind.json:6: enum 'UnionKind' should not end in 'Kind'
diff --git a/tests/qapi-schema/reserved-type-kind.json b/tests/qapi-schema/reserved-type-kind.json
index 9ecaba12bc..a094941561 100644
--- a/tests/qapi-schema/reserved-type-kind.json
+++ b/tests/qapi-schema/reserved-type-kind.json
@@ -1,2 +1,6 @@
# we reject types that would conflict with implicit union enum
+
+##
+# @UnionKind:
+##
{ 'enum': 'UnionKind', 'data': [ 'oops' ] }
diff --git a/tests/qapi-schema/reserved-type-list.err b/tests/qapi-schema/reserved-type-list.err
index 4510fa6d90..ec0531c4b9 100644
--- a/tests/qapi-schema/reserved-type-list.err
+++ b/tests/qapi-schema/reserved-type-list.err
@@ -1 +1 @@
-tests/qapi-schema/reserved-type-list.json:5: struct 'FooList' should not end in 'List'
+tests/qapi-schema/reserved-type-list.json:9: struct 'FooList' should not end in 'List'
diff --git a/tests/qapi-schema/reserved-type-list.json b/tests/qapi-schema/reserved-type-list.json
index 98d53bf808..6effb78e7f 100644
--- a/tests/qapi-schema/reserved-type-list.json
+++ b/tests/qapi-schema/reserved-type-list.json
@@ -2,4 +2,8 @@
# We reserve names ending in 'List' for use by array types.
# TODO - we could choose array names to avoid collision with user types,
# in order to let this compile
+
+##
+# @FooList:
+##
{ 'struct': 'FooList', 'data': { 's': 'str' } }
diff --git a/tests/qapi-schema/returns-alternate.err b/tests/qapi-schema/returns-alternate.err
index dfbb419cac..2b81623ca3 100644
--- a/tests/qapi-schema/returns-alternate.err
+++ b/tests/qapi-schema/returns-alternate.err
@@ -1 +1 @@
-tests/qapi-schema/returns-alternate.json:3: 'returns' for command 'oops' cannot use alternate type 'Alt'
+tests/qapi-schema/returns-alternate.json:10: 'returns' for command 'oops' cannot use alternate type 'Alt'
diff --git a/tests/qapi-schema/returns-alternate.json b/tests/qapi-schema/returns-alternate.json
index 972390c06b..005bf2d148 100644
--- a/tests/qapi-schema/returns-alternate.json
+++ b/tests/qapi-schema/returns-alternate.json
@@ -1,3 +1,10 @@
# we reject returns if it is an alternate type
+
+##
+# @Alt:
+##
{ 'alternate': 'Alt', 'data': { 'a': 'int', 'b': 'str' } }
+##
+# @oops:
+##
{ 'command': 'oops', 'returns': 'Alt' }
diff --git a/tests/qapi-schema/returns-array-bad.err b/tests/qapi-schema/returns-array-bad.err
index 138095ccde..b53bdb0ade 100644
--- a/tests/qapi-schema/returns-array-bad.err
+++ b/tests/qapi-schema/returns-array-bad.err
@@ -1 +1 @@
-tests/qapi-schema/returns-array-bad.json:2: 'returns' for command 'oops': array type must contain single type name
+tests/qapi-schema/returns-array-bad.json:6: 'returns' for command 'oops': array type must contain single type name
diff --git a/tests/qapi-schema/returns-array-bad.json b/tests/qapi-schema/returns-array-bad.json
index 09b0b1f182..30528fed29 100644
--- a/tests/qapi-schema/returns-array-bad.json
+++ b/tests/qapi-schema/returns-array-bad.json
@@ -1,2 +1,6 @@
# we reject an array return that is not a single type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'returns': [ 'str', 'str' ] }
diff --git a/tests/qapi-schema/returns-dict.err b/tests/qapi-schema/returns-dict.err
index eb2d0c4661..1570a35d49 100644
--- a/tests/qapi-schema/returns-dict.err
+++ b/tests/qapi-schema/returns-dict.err
@@ -1 +1 @@
-tests/qapi-schema/returns-dict.json:2: 'returns' for command 'oops' should be a type name
+tests/qapi-schema/returns-dict.json:6: 'returns' for command 'oops' should be a type name
diff --git a/tests/qapi-schema/returns-dict.json b/tests/qapi-schema/returns-dict.json
index 1cfef3ede7..6a3ed0f34d 100644
--- a/tests/qapi-schema/returns-dict.json
+++ b/tests/qapi-schema/returns-dict.json
@@ -1,2 +1,6 @@
# we reject inline struct return type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'returns': { 'a': 'str' } }
diff --git a/tests/qapi-schema/returns-unknown.err b/tests/qapi-schema/returns-unknown.err
index 1f43e3ac9f..d76bcfe455 100644
--- a/tests/qapi-schema/returns-unknown.err
+++ b/tests/qapi-schema/returns-unknown.err
@@ -1 +1 @@
-tests/qapi-schema/returns-unknown.json:2: 'returns' for command 'oops' uses unknown type 'NoSuchType'
+tests/qapi-schema/returns-unknown.json:6: 'returns' for command 'oops' uses unknown type 'NoSuchType'
diff --git a/tests/qapi-schema/returns-unknown.json b/tests/qapi-schema/returns-unknown.json
index 25bd498bff..3837f0e607 100644
--- a/tests/qapi-schema/returns-unknown.json
+++ b/tests/qapi-schema/returns-unknown.json
@@ -1,2 +1,6 @@
# we reject returns if it does not contain a known type
+
+##
+# @oops:
+##
{ 'command': 'oops', 'returns': 'NoSuchType' }
diff --git a/tests/qapi-schema/returns-whitelist.err b/tests/qapi-schema/returns-whitelist.err
index f47c1ee7ca..e77ea2da3f 100644
--- a/tests/qapi-schema/returns-whitelist.err
+++ b/tests/qapi-schema/returns-whitelist.err
@@ -1 +1 @@
-tests/qapi-schema/returns-whitelist.json:10: 'returns' for command 'no-way-this-will-get-whitelisted' cannot use built-in type 'int'
+tests/qapi-schema/returns-whitelist.json:26: 'returns' for command 'no-way-this-will-get-whitelisted' cannot use built-in type 'int'
diff --git a/tests/qapi-schema/returns-whitelist.json b/tests/qapi-schema/returns-whitelist.json
index e8b3cea396..0bc952db87 100644
--- a/tests/qapi-schema/returns-whitelist.json
+++ b/tests/qapi-schema/returns-whitelist.json
@@ -1,11 +1,27 @@
# we enforce that 'returns' be a dict or array of dict unless whitelisted
+
+##
+# @human-monitor-command:
+##
{ 'command': 'human-monitor-command',
'data': {'command-line': 'str', '*cpu-index': 'int'},
'returns': 'str' }
+##
+# @TpmModel:
+##
{ 'enum': 'TpmModel', 'data': [ 'tpm-tis' ] }
+##
+# @query-tpm-models:
+##
{ 'command': 'query-tpm-models', 'returns': ['TpmModel'] }
+##
+# @guest-get-time:
+##
{ 'command': 'guest-get-time',
'returns': 'int' }
+##
+# @no-way-this-will-get-whitelisted:
+##
{ 'command': 'no-way-this-will-get-whitelisted',
'returns': [ 'int' ] }
diff --git a/tests/qapi-schema/struct-base-clash-deep.err b/tests/qapi-schema/struct-base-clash-deep.err
index e2d7943f21..1b7c0e9d12 100644
--- a/tests/qapi-schema/struct-base-clash-deep.err
+++ b/tests/qapi-schema/struct-base-clash-deep.err
@@ -1 +1 @@
-tests/qapi-schema/struct-base-clash-deep.json:10: 'name' (member of Sub) collides with 'name' (member of Base)
+tests/qapi-schema/struct-base-clash-deep.json:20: 'name' (member of Sub) collides with 'name' (member of Base)
diff --git a/tests/qapi-schema/struct-base-clash-deep.json b/tests/qapi-schema/struct-base-clash-deep.json
index fa873ab5d4..646d680ad6 100644
--- a/tests/qapi-schema/struct-base-clash-deep.json
+++ b/tests/qapi-schema/struct-base-clash-deep.json
@@ -2,11 +2,21 @@
# Here, 'name' would have to appear twice on the wire, locally and
# indirectly for the grandparent base; the collision doesn't care that
# one instance is optional.
+
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'name': 'str' } }
+##
+# @Mid:
+##
{ 'struct': 'Mid',
'base': 'Base',
'data': { 'value': 'int' } }
+##
+# @Sub:
+##
{ 'struct': 'Sub',
'base': 'Mid',
'data': { '*name': 'str' } }
diff --git a/tests/qapi-schema/struct-base-clash.err b/tests/qapi-schema/struct-base-clash.err
index c52f33d27b..5fe6393efa 100644
--- a/tests/qapi-schema/struct-base-clash.err
+++ b/tests/qapi-schema/struct-base-clash.err
@@ -1 +1 @@
-tests/qapi-schema/struct-base-clash.json:5: 'name' (member of Sub) collides with 'name' (member of Base)
+tests/qapi-schema/struct-base-clash.json:12: 'name' (member of Sub) collides with 'name' (member of Base)
diff --git a/tests/qapi-schema/struct-base-clash.json b/tests/qapi-schema/struct-base-clash.json
index 11aec80fe5..a8539958b5 100644
--- a/tests/qapi-schema/struct-base-clash.json
+++ b/tests/qapi-schema/struct-base-clash.json
@@ -1,7 +1,14 @@
# Reject attempts to duplicate QMP members
# Here, 'name' would have to appear twice on the wire, locally and for base.
+
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'name': 'str' } }
+##
+# @Sub:
+##
{ 'struct': 'Sub',
'base': 'Base',
'data': { 'name': 'str' } }
diff --git a/tests/qapi-schema/struct-data-invalid.err b/tests/qapi-schema/struct-data-invalid.err
index 6644f4c2ad..27163355bd 100644
--- a/tests/qapi-schema/struct-data-invalid.err
+++ b/tests/qapi-schema/struct-data-invalid.err
@@ -1 +1 @@
-tests/qapi-schema/struct-data-invalid.json:1: 'data' for struct 'foo' should be a dictionary or type name
+tests/qapi-schema/struct-data-invalid.json:4: 'data' for struct 'foo' should be a dictionary or type name
diff --git a/tests/qapi-schema/struct-data-invalid.json b/tests/qapi-schema/struct-data-invalid.json
index 9adbc3bb6b..aa817bda34 100644
--- a/tests/qapi-schema/struct-data-invalid.json
+++ b/tests/qapi-schema/struct-data-invalid.json
@@ -1,2 +1,5 @@
+##
+# @foo:
+##
{ 'struct': 'foo',
'data': false }
diff --git a/tests/qapi-schema/struct-member-invalid.err b/tests/qapi-schema/struct-member-invalid.err
index 69a326d450..f2b105ba88 100644
--- a/tests/qapi-schema/struct-member-invalid.err
+++ b/tests/qapi-schema/struct-member-invalid.err
@@ -1 +1 @@
-tests/qapi-schema/struct-member-invalid.json:1: Member 'a' of 'data' for struct 'foo' should be a type name
+tests/qapi-schema/struct-member-invalid.json:4: Member 'a' of 'data' for struct 'foo' should be a type name
diff --git a/tests/qapi-schema/struct-member-invalid.json b/tests/qapi-schema/struct-member-invalid.json
index 8f172f7a87..10c74262d3 100644
--- a/tests/qapi-schema/struct-member-invalid.json
+++ b/tests/qapi-schema/struct-member-invalid.json
@@ -1,2 +1,5 @@
+##
+# @foo:
+##
{ 'struct': 'foo',
'data': { 'a': false } }
diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py
index ef74e2c4c8..b4cde4ff4f 100644
--- a/tests/qapi-schema/test-qapi.py
+++ b/tests/qapi-schema/test-qapi.py
@@ -55,3 +55,17 @@ class QAPISchemaTestVisitor(QAPISchemaVisitor):
schema = QAPISchema(sys.argv[1])
schema.visit(QAPISchemaTestVisitor())
+
+for doc in schema.docs:
+ if doc.symbol:
+ print 'doc symbol=%s expr=%s' % \
+ (doc.symbol, doc.expr.items()[0])
+ else:
+ print 'doc freeform'
+ for arg, section in doc.args.iteritems():
+ print ' arg=%s\n%s' % (arg, section)
+ for section in doc.sections:
+ print ' section=%s\n%s' % (section.name, section)
+ body = str(doc.body)
+ if body:
+ print ' body=\n%s' % body
diff --git a/tests/qapi-schema/type-bypass-bad-gen.err b/tests/qapi-schema/type-bypass-bad-gen.err
index a83c3c655d..bd5431f60b 100644
--- a/tests/qapi-schema/type-bypass-bad-gen.err
+++ b/tests/qapi-schema/type-bypass-bad-gen.err
@@ -1 +1 @@
-tests/qapi-schema/type-bypass-bad-gen.json:2: 'gen' of command 'foo' should only use false value
+tests/qapi-schema/type-bypass-bad-gen.json:6: 'gen' of command 'foo' should only use false value
diff --git a/tests/qapi-schema/type-bypass-bad-gen.json b/tests/qapi-schema/type-bypass-bad-gen.json
index e8dec34249..7162c1a0ca 100644
--- a/tests/qapi-schema/type-bypass-bad-gen.json
+++ b/tests/qapi-schema/type-bypass-bad-gen.json
@@ -1,2 +1,6 @@
# 'gen' should only appear with value false
+
+##
+# @foo:
+##
{ 'command': 'foo', 'gen': 'whatever' }
diff --git a/tests/qapi-schema/unicode-str.err b/tests/qapi-schema/unicode-str.err
index f621cd6448..92ee277370 100644
--- a/tests/qapi-schema/unicode-str.err
+++ b/tests/qapi-schema/unicode-str.err
@@ -1 +1 @@
-tests/qapi-schema/unicode-str.json:2: 'command' uses invalid name 'é'
+tests/qapi-schema/unicode-str.json:6: 'command' uses invalid name 'é'
diff --git a/tests/qapi-schema/unicode-str.json b/tests/qapi-schema/unicode-str.json
index 5253a1b9f3..75a08b3d93 100644
--- a/tests/qapi-schema/unicode-str.json
+++ b/tests/qapi-schema/unicode-str.json
@@ -1,2 +1,6 @@
# we don't support full Unicode strings, yet
+
+##
+# @e:
+##
{ 'command': 'é' }
diff --git a/tests/qapi-schema/union-base-no-discriminator.err b/tests/qapi-schema/union-base-no-discriminator.err
index 8b7a24260f..ca6ee92357 100644
--- a/tests/qapi-schema/union-base-no-discriminator.err
+++ b/tests/qapi-schema/union-base-no-discriminator.err
@@ -1 +1 @@
-tests/qapi-schema/union-base-no-discriminator.json:11: Simple union 'TestUnion' must not have a base
+tests/qapi-schema/union-base-no-discriminator.json:23: Simple union 'TestUnion' must not have a base
diff --git a/tests/qapi-schema/union-base-no-discriminator.json b/tests/qapi-schema/union-base-no-discriminator.json
index 1409cf5c9e..cc6bac1424 100644
--- a/tests/qapi-schema/union-base-no-discriminator.json
+++ b/tests/qapi-schema/union-base-no-discriminator.json
@@ -1,13 +1,25 @@
+##
+# @TestTypeA:
+##
# we reject simple unions with a base (or flat unions without discriminator)
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @Base:
+##
{ 'struct': 'Base',
'data': { 'string': 'str' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'Base',
'data': { 'value1': 'TestTypeA',
diff --git a/tests/qapi-schema/union-branch-case.err b/tests/qapi-schema/union-branch-case.err
index 11521901d8..9095bae565 100644
--- a/tests/qapi-schema/union-branch-case.err
+++ b/tests/qapi-schema/union-branch-case.err
@@ -1 +1 @@
-tests/qapi-schema/union-branch-case.json:2: 'Branch' (branch of NoWayThisWillGetWhitelisted) should not use uppercase
+tests/qapi-schema/union-branch-case.json:6: 'Branch' (branch of NoWayThisWillGetWhitelisted) should not use uppercase
diff --git a/tests/qapi-schema/union-branch-case.json b/tests/qapi-schema/union-branch-case.json
index e6565dc3b3..6de131548c 100644
--- a/tests/qapi-schema/union-branch-case.json
+++ b/tests/qapi-schema/union-branch-case.json
@@ -1,2 +1,6 @@
# Branch names should be 'lower-case' unless the union is whitelisted
+
+##
+# @NoWayThisWillGetWhitelisted:
+##
{ 'union': 'NoWayThisWillGetWhitelisted', 'data': { 'Branch': 'int' } }
diff --git a/tests/qapi-schema/union-clash-branches.err b/tests/qapi-schema/union-clash-branches.err
index e5b21135bb..640caeab8c 100644
--- a/tests/qapi-schema/union-clash-branches.err
+++ b/tests/qapi-schema/union-clash-branches.err
@@ -1 +1 @@
-tests/qapi-schema/union-clash-branches.json:4: 'a_b' (branch of TestUnion) collides with 'a-b' (branch of TestUnion)
+tests/qapi-schema/union-clash-branches.json:8: 'a_b' (branch of TestUnion) collides with 'a-b' (branch of TestUnion)
diff --git a/tests/qapi-schema/union-clash-branches.json b/tests/qapi-schema/union-clash-branches.json
index 3bece8c948..6615665dfe 100644
--- a/tests/qapi-schema/union-clash-branches.json
+++ b/tests/qapi-schema/union-clash-branches.json
@@ -1,5 +1,9 @@
# Union branch name collision
# Reject a union that would result in a collision in generated C names (this
# would try to generate two members 'a_b').
+
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'data': { 'a-b': 'int', 'a_b': 'str' } }
diff --git a/tests/qapi-schema/union-empty.err b/tests/qapi-schema/union-empty.err
index 12c20221bd..749bc76fc5 100644
--- a/tests/qapi-schema/union-empty.err
+++ b/tests/qapi-schema/union-empty.err
@@ -1 +1 @@
-tests/qapi-schema/union-empty.json:2: Union 'Union' cannot have empty 'data'
+tests/qapi-schema/union-empty.json:6: Union 'Union' cannot have empty 'data'
diff --git a/tests/qapi-schema/union-empty.json b/tests/qapi-schema/union-empty.json
index 1f0b13ca21..c9b0a1ef33 100644
--- a/tests/qapi-schema/union-empty.json
+++ b/tests/qapi-schema/union-empty.json
@@ -1,2 +1,6 @@
# unions cannot be empty
+
+##
+# @Union:
+##
{ 'union': 'Union', 'data': { } }
diff --git a/tests/qapi-schema/union-invalid-base.err b/tests/qapi-schema/union-invalid-base.err
index 03d7b97a93..41e238f453 100644
--- a/tests/qapi-schema/union-invalid-base.err
+++ b/tests/qapi-schema/union-invalid-base.err
@@ -1 +1 @@
-tests/qapi-schema/union-invalid-base.json:8: 'base' for union 'TestUnion' cannot use built-in type 'int'
+tests/qapi-schema/union-invalid-base.json:18: 'base' for union 'TestUnion' cannot use built-in type 'int'
diff --git a/tests/qapi-schema/union-invalid-base.json b/tests/qapi-schema/union-invalid-base.json
index 92be39df69..fd837cb80b 100644
--- a/tests/qapi-schema/union-invalid-base.json
+++ b/tests/qapi-schema/union-invalid-base.json
@@ -1,10 +1,20 @@
# a union base type must be a struct
+
+##
+# @TestTypeA:
+##
{ 'struct': 'TestTypeA',
'data': { 'string': 'str' } }
+##
+# @TestTypeB:
+##
{ 'struct': 'TestTypeB',
'data': { 'integer': 'int' } }
+##
+# @TestUnion:
+##
{ 'union': 'TestUnion',
'base': 'int',
'discriminator': 'int',
diff --git a/tests/qapi-schema/union-optional-branch.err b/tests/qapi-schema/union-optional-branch.err
index 3ada1334dc..60523c07e4 100644
--- a/tests/qapi-schema/union-optional-branch.err
+++ b/tests/qapi-schema/union-optional-branch.err
@@ -1 +1 @@
-tests/qapi-schema/union-optional-branch.json:2: Member of union 'Union' does not allow optional name '*a'
+tests/qapi-schema/union-optional-branch.json:6: Member of union 'Union' does not allow optional name '*a'
diff --git a/tests/qapi-schema/union-optional-branch.json b/tests/qapi-schema/union-optional-branch.json
index 591615fc68..7d2ee4c730 100644
--- a/tests/qapi-schema/union-optional-branch.json
+++ b/tests/qapi-schema/union-optional-branch.json
@@ -1,2 +1,6 @@
# union branches cannot be optional
+
+##
+# @Union:
+##
{ 'union': 'Union', 'data': { '*a': 'int', 'b': 'str' } }
diff --git a/tests/qapi-schema/union-unknown.err b/tests/qapi-schema/union-unknown.err
index 54fe456f9c..5568302205 100644
--- a/tests/qapi-schema/union-unknown.err
+++ b/tests/qapi-schema/union-unknown.err
@@ -1 +1 @@
-tests/qapi-schema/union-unknown.json:2: Member 'unknown' of union 'Union' uses unknown type 'MissingType'
+tests/qapi-schema/union-unknown.json:6: Member 'unknown' of union 'Union' uses unknown type 'MissingType'
diff --git a/tests/qapi-schema/union-unknown.json b/tests/qapi-schema/union-unknown.json
index aa7e8143d8..5042b23197 100644
--- a/tests/qapi-schema/union-unknown.json
+++ b/tests/qapi-schema/union-unknown.json
@@ -1,3 +1,7 @@
# we reject a union with unknown type in branch
+
+##
+# @Union:
+##
{ 'union': 'Union',
'data': { 'unknown': 'MissingType' } }
diff --git a/tests/qapi-schema/unknown-escape.err b/tests/qapi-schema/unknown-escape.err
index 000e30ddf3..1a4ead632b 100644
--- a/tests/qapi-schema/unknown-escape.err
+++ b/tests/qapi-schema/unknown-escape.err
@@ -1 +1 @@
-tests/qapi-schema/unknown-escape.json:3:21: Unknown escape \x
+tests/qapi-schema/unknown-escape.json:7:21: Unknown escape \x
diff --git a/tests/qapi-schema/unknown-escape.json b/tests/qapi-schema/unknown-escape.json
index 8e6891e52a..e3ae6793f2 100644
--- a/tests/qapi-schema/unknown-escape.json
+++ b/tests/qapi-schema/unknown-escape.json
@@ -1,3 +1,7 @@
# we only recognize JSON escape sequences, plus our \' extension (no \x)
+
+##
+# @foo:
+##
# { 'command': 'foo', 'data': {} }
{ 'command': 'foo', 'dat\x61':{} }
diff --git a/tests/qapi-schema/unknown-expr-key.err b/tests/qapi-schema/unknown-expr-key.err
index 12f5ed5b43..b19a668bd6 100644
--- a/tests/qapi-schema/unknown-expr-key.err
+++ b/tests/qapi-schema/unknown-expr-key.err
@@ -1 +1 @@
-tests/qapi-schema/unknown-expr-key.json:2: Unknown key 'bogus' in struct 'bar'
+tests/qapi-schema/unknown-expr-key.json:6: Unknown key 'bogus' in struct 'bar'
diff --git a/tests/qapi-schema/unknown-expr-key.json b/tests/qapi-schema/unknown-expr-key.json
index 3b2be00cc4..1b764c7b9d 100644
--- a/tests/qapi-schema/unknown-expr-key.json
+++ b/tests/qapi-schema/unknown-expr-key.json
@@ -1,2 +1,6 @@
# we reject an expression with unknown top-level keys
+
+##
+# @bar:
+##
{ 'struct': 'bar', 'data': { 'string': 'str'}, 'bogus': { } }
diff --git a/tests/qemu-iotests/071.out b/tests/qemu-iotests/071.out
index 8ff423f56b..dd879f1212 100644
--- a/tests/qemu-iotests/071.out
+++ b/tests/qemu-iotests/071.out
@@ -12,7 +12,7 @@ read 512/512 bytes at offset 229376
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
+blkverify: read offset=0 bytes=512 contents mismatch at offset 0
=== Testing blkverify through file blockref ===
@@ -26,7 +26,7 @@ read 512/512 bytes at offset 229376
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
wrote 512/512 bytes at offset 0
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
-blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
+blkverify: read offset=0 bytes=512 contents mismatch at offset 0
=== Testing blkdebug through filename ===
@@ -56,7 +56,7 @@ QMP_VERSION
{"return": {}}
{"return": {}}
{"return": {}}
-blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
+blkverify: read offset=0 bytes=512 contents mismatch at offset 0
=== Testing blkverify on existing raw block device ===
@@ -66,7 +66,7 @@ QMP_VERSION
{"return": {}}
{"return": {}}
{"return": {}}
-blkverify: read sector_num=0 nb_sectors=1 contents mismatch in sector 0
+blkverify: read offset=0 bytes=512 contents mismatch at offset 0
=== Testing blkdebug's set-state through QMP ===
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 5be99f8287..2754f154ce 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -128,7 +128,7 @@ static void *test_acquire_thread(void *opaque)
static void set_event_notifier(AioContext *ctx, EventNotifier *notifier,
EventNotifierHandler *handler)
{
- aio_set_event_notifier(ctx, notifier, false, handler);
+ aio_set_event_notifier(ctx, notifier, false, handler, NULL);
}
static void dummy_notifier_read(EventNotifier *n)
@@ -388,7 +388,7 @@ static void test_aio_external_client(void)
for (i = 1; i < 3; i++) {
EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true };
event_notifier_init(&data.e, false);
- aio_set_event_notifier(ctx, &data.e, true, event_ready_cb);
+ aio_set_event_notifier(ctx, &data.e, true, event_ready_cb, NULL);
event_notifier_set(&data.e);
for (j = 0; j < i; j++) {
aio_disable_external(ctx);
diff --git a/tests/test-bitcnt.c b/tests/test-bitcnt.c
new file mode 100644
index 0000000000..e153dcb8a2
--- /dev/null
+++ b/tests/test-bitcnt.c
@@ -0,0 +1,140 @@
+/*
+ * Test bit count routines
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+
+struct bitcnt_test_data {
+ /* value to count */
+ union {
+ uint8_t w8;
+ uint16_t w16;
+ uint32_t w32;
+ uint64_t w64;
+ } value;
+ /* expected result */
+ int popct;
+};
+
+struct bitcnt_test_data eight_bit_data[] = {
+ { { .w8 = 0x00 }, .popct=0 },
+ { { .w8 = 0x01 }, .popct=1 },
+ { { .w8 = 0x03 }, .popct=2 },
+ { { .w8 = 0x04 }, .popct=1 },
+ { { .w8 = 0x0f }, .popct=4 },
+ { { .w8 = 0x3f }, .popct=6 },
+ { { .w8 = 0x40 }, .popct=1 },
+ { { .w8 = 0xf0 }, .popct=4 },
+ { { .w8 = 0x7f }, .popct=7 },
+ { { .w8 = 0x80 }, .popct=1 },
+ { { .w8 = 0xf1 }, .popct=5 },
+ { { .w8 = 0xfe }, .popct=7 },
+ { { .w8 = 0xff }, .popct=8 },
+};
+
+static void test_ctpop8(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eight_bit_data); i++) {
+ struct bitcnt_test_data *d = &eight_bit_data[i];
+ g_assert(ctpop8(d->value.w8)==d->popct);
+ }
+}
+
+struct bitcnt_test_data sixteen_bit_data[] = {
+ { { .w16 = 0x0000 }, .popct=0 },
+ { { .w16 = 0x0001 }, .popct=1 },
+ { { .w16 = 0x0003 }, .popct=2 },
+ { { .w16 = 0x000f }, .popct=4 },
+ { { .w16 = 0x003f }, .popct=6 },
+ { { .w16 = 0x00f0 }, .popct=4 },
+ { { .w16 = 0x0f0f }, .popct=8 },
+ { { .w16 = 0x1f1f }, .popct=10 },
+ { { .w16 = 0x4000 }, .popct=1 },
+ { { .w16 = 0x4001 }, .popct=2 },
+ { { .w16 = 0x7000 }, .popct=3 },
+ { { .w16 = 0x7fff }, .popct=15 },
+};
+
+static void test_ctpop16(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sixteen_bit_data); i++) {
+ struct bitcnt_test_data *d = &sixteen_bit_data[i];
+ g_assert(ctpop16(d->value.w16)==d->popct);
+ }
+}
+
+struct bitcnt_test_data thirtytwo_bit_data[] = {
+ { { .w32 = 0x00000000 }, .popct=0 },
+ { { .w32 = 0x00000001 }, .popct=1 },
+ { { .w32 = 0x0000000f }, .popct=4 },
+ { { .w32 = 0x00000f0f }, .popct=8 },
+ { { .w32 = 0x00001f1f }, .popct=10 },
+ { { .w32 = 0x00004001 }, .popct=2 },
+ { { .w32 = 0x00007000 }, .popct=3 },
+ { { .w32 = 0x00007fff }, .popct=15 },
+ { { .w32 = 0x55555555 }, .popct=16 },
+ { { .w32 = 0xaaaaaaaa }, .popct=16 },
+ { { .w32 = 0xff000000 }, .popct=8 },
+ { { .w32 = 0xc0c0c0c0 }, .popct=8 },
+ { { .w32 = 0x0ffffff0 }, .popct=24 },
+ { { .w32 = 0x80000000 }, .popct=1 },
+ { { .w32 = 0xffffffff }, .popct=32 },
+};
+
+static void test_ctpop32(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(thirtytwo_bit_data); i++) {
+ struct bitcnt_test_data *d = &thirtytwo_bit_data[i];
+ g_assert(ctpop32(d->value.w32)==d->popct);
+ }
+}
+
+struct bitcnt_test_data sixtyfour_bit_data[] = {
+ { { .w64 = 0x0000000000000000ULL }, .popct=0 },
+ { { .w64 = 0x0000000000000001ULL }, .popct=1 },
+ { { .w64 = 0x000000000000000fULL }, .popct=4 },
+ { { .w64 = 0x0000000000000f0fULL }, .popct=8 },
+ { { .w64 = 0x0000000000001f1fULL }, .popct=10 },
+ { { .w64 = 0x0000000000004001ULL }, .popct=2 },
+ { { .w64 = 0x0000000000007000ULL }, .popct=3 },
+ { { .w64 = 0x0000000000007fffULL }, .popct=15 },
+ { { .w64 = 0x0000005500555555ULL }, .popct=16 },
+ { { .w64 = 0x00aa0000aaaa00aaULL }, .popct=16 },
+ { { .w64 = 0x000f000000f00000ULL }, .popct=8 },
+ { { .w64 = 0x0c0c0000c0c0c0c0ULL }, .popct=12 },
+ { { .w64 = 0xf00f00f0f0f0f000ULL }, .popct=24 },
+ { { .w64 = 0x8000000000000000ULL }, .popct=1 },
+ { { .w64 = 0xf0f0f0f0f0f0f0f0ULL }, .popct=32 },
+ { { .w64 = 0xffffffffffffffffULL }, .popct=64 },
+};
+
+static void test_ctpop64(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sixtyfour_bit_data); i++) {
+ struct bitcnt_test_data *d = &sixtyfour_bit_data[i];
+ g_assert(ctpop64(d->value.w64)==d->popct);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_add_func("/bitcnt/ctpop8", test_ctpop8);
+ g_test_add_func("/bitcnt/ctpop16", test_ctpop16);
+ g_test_add_func("/bitcnt/ctpop32", test_ctpop32);
+ g_test_add_func("/bitcnt/ctpop64", test_ctpop64);
+ return g_test_run();
+}
diff --git a/tests/test-io-channel-socket.c b/tests/test-io-channel-socket.c
index aa88c3cf45..aaa9116fb7 100644
--- a/tests/test-io-channel-socket.c
+++ b/tests/test-io-channel-socket.c
@@ -156,12 +156,11 @@ struct TestIOChannelData {
};
-static void test_io_channel_complete(Object *src,
- Error *err,
+static void test_io_channel_complete(QIOTask *task,
gpointer opaque)
{
struct TestIOChannelData *data = opaque;
- data->err = err != NULL;
+ data->err = qio_task_propagate_error(task, NULL);
g_main_loop_quit(data->loop);
}
diff --git a/tests/test-io-channel-tls.c b/tests/test-io-channel-tls.c
index bd3ae2bf7a..8eaa208e1b 100644
--- a/tests/test-io-channel-tls.c
+++ b/tests/test-io-channel-tls.c
@@ -53,14 +53,13 @@ struct QIOChannelTLSHandshakeData {
bool failed;
};
-static void test_tls_handshake_done(Object *source,
- Error *err,
+static void test_tls_handshake_done(QIOTask *task,
gpointer opaque)
{
struct QIOChannelTLSHandshakeData *data = opaque;
data->finished = true;
- data->failed = err != NULL;
+ data->failed = qio_task_propagate_error(task, NULL);
}
diff --git a/tests/test-io-task.c b/tests/test-io-task.c
index e091c12e10..ff62272d5f 100644
--- a/tests/test-io-task.c
+++ b/tests/test-io-task.c
@@ -50,14 +50,13 @@ struct TestTaskData {
};
-static void task_callback(Object *source,
- Error *err,
+static void task_callback(QIOTask *task,
gpointer opaque)
{
struct TestTaskData *data = opaque;
- data->source = source;
- data->err = err;
+ data->source = qio_task_get_source(task);
+ qio_task_propagate_error(task, &data->err);
}
@@ -76,7 +75,6 @@ static void test_task_complete(void)
g_assert(obj == src);
object_unref(obj);
- object_unref(src);
g_assert(data.source == obj);
g_assert(data.err == NULL);
@@ -121,9 +119,9 @@ static void test_task_failure(void)
error_setg(&err, "Some error");
- qio_task_abort(task, err);
+ qio_task_set_error(task, err);
+ qio_task_complete(task);
- error_free(err);
object_unref(obj);
g_assert(data.source == obj);
@@ -142,31 +140,28 @@ struct TestThreadWorkerData {
GMainLoop *loop;
};
-static int test_task_thread_worker(QIOTask *task,
- Error **errp,
- gpointer opaque)
+static void test_task_thread_worker(QIOTask *task,
+ gpointer opaque)
{
struct TestThreadWorkerData *data = opaque;
data->worker = g_thread_self();
if (data->fail) {
- error_setg(errp, "Testing fail");
- return -1;
+ Error *err = NULL;
+ error_setg(&err, "Testing fail");
+ qio_task_set_error(task, err);
}
-
- return 0;
}
-static void test_task_thread_callback(Object *source,
- Error *err,
+static void test_task_thread_callback(QIOTask *task,
gpointer opaque)
{
struct TestThreadWorkerData *data = opaque;
- data->source = source;
- data->err = err;
+ data->source = qio_task_get_source(task);
+ qio_task_propagate_error(task, &data->err);
data->complete = g_thread_self();
diff --git a/tests/test-vmstate.c b/tests/test-vmstate.c
index d2f529b831..9d87faf12b 100644
--- a/tests/test-vmstate.c
+++ b/tests/test-vmstate.c
@@ -544,6 +544,150 @@ static void test_arr_ptr_str_no0_load(void)
}
}
+/* test QTAILQ migration */
+typedef struct TestQtailqElement TestQtailqElement;
+
+struct TestQtailqElement {
+ bool b;
+ uint8_t u8;
+ QTAILQ_ENTRY(TestQtailqElement) next;
+};
+
+typedef struct TestQtailq {
+ int16_t i16;
+ QTAILQ_HEAD(TestQtailqHead, TestQtailqElement) q;
+ int32_t i32;
+} TestQtailq;
+
+static const VMStateDescription vmstate_q_element = {
+ .name = "test/queue-element",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(b, TestQtailqElement),
+ VMSTATE_UINT8(u8, TestQtailqElement),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_q = {
+ .name = "test/queue",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT16(i16, TestQtailq),
+ VMSTATE_QTAILQ_V(q, TestQtailq, 1, vmstate_q_element, TestQtailqElement,
+ next),
+ VMSTATE_INT32(i32, TestQtailq),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+uint8_t wire_q[] = {
+ /* i16 */ 0xfe, 0x0,
+ /* start of element 0 of q */ 0x01,
+ /* .b */ 0x01,
+ /* .u8 */ 0x82,
+ /* start of element 1 of q */ 0x01,
+ /* b */ 0x00,
+ /* u8 */ 0x41,
+ /* end of q */ 0x00,
+ /* i32 */ 0x00, 0x01, 0x11, 0x70,
+ QEMU_VM_EOF, /* just to ensure we won't get EOF reported prematurely */
+};
+
+static void test_save_q(void)
+{
+ TestQtailq obj_q = {
+ .i16 = -512,
+ .i32 = 70000,
+ };
+
+ TestQtailqElement obj_qe1 = {
+ .b = true,
+ .u8 = 130,
+ };
+
+ TestQtailqElement obj_qe2 = {
+ .b = false,
+ .u8 = 65,
+ };
+
+ QTAILQ_INIT(&obj_q.q);
+ QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe1, next);
+ QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe2, next);
+
+ save_vmstate(&vmstate_q, &obj_q);
+ compare_vmstate(wire_q, sizeof(wire_q));
+}
+
+static void test_load_q(void)
+{
+ TestQtailq obj_q = {
+ .i16 = -512,
+ .i32 = 70000,
+ };
+
+ TestQtailqElement obj_qe1 = {
+ .b = true,
+ .u8 = 130,
+ };
+
+ TestQtailqElement obj_qe2 = {
+ .b = false,
+ .u8 = 65,
+ };
+
+ QTAILQ_INIT(&obj_q.q);
+ QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe1, next);
+ QTAILQ_INSERT_TAIL(&obj_q.q, &obj_qe2, next);
+
+ QEMUFile *fsave = open_test_file(true);
+
+ qemu_put_buffer(fsave, wire_q, sizeof(wire_q));
+ g_assert(!qemu_file_get_error(fsave));
+ qemu_fclose(fsave);
+
+ QEMUFile *fload = open_test_file(false);
+ TestQtailq tgt;
+
+ QTAILQ_INIT(&tgt.q);
+ vmstate_load_state(fload, &vmstate_q, &tgt, 1);
+ char eof = qemu_get_byte(fload);
+ g_assert(!qemu_file_get_error(fload));
+ g_assert_cmpint(tgt.i16, ==, obj_q.i16);
+ g_assert_cmpint(tgt.i32, ==, obj_q.i32);
+ g_assert_cmpint(eof, ==, QEMU_VM_EOF);
+
+ TestQtailqElement *qele_from = QTAILQ_FIRST(&obj_q.q);
+ TestQtailqElement *qlast_from = QTAILQ_LAST(&obj_q.q, TestQtailqHead);
+ TestQtailqElement *qele_to = QTAILQ_FIRST(&tgt.q);
+ TestQtailqElement *qlast_to = QTAILQ_LAST(&tgt.q, TestQtailqHead);
+
+ while (1) {
+ g_assert_cmpint(qele_to->b, ==, qele_from->b);
+ g_assert_cmpint(qele_to->u8, ==, qele_from->u8);
+ if ((qele_from == qlast_from) || (qele_to == qlast_to)) {
+ break;
+ }
+ qele_from = QTAILQ_NEXT(qele_from, next);
+ qele_to = QTAILQ_NEXT(qele_to, next);
+ }
+
+ g_assert_cmpint((uintptr_t) qele_from, ==, (uintptr_t) qlast_from);
+ g_assert_cmpint((uintptr_t) qele_to, ==, (uintptr_t) qlast_to);
+
+ /* clean up */
+ TestQtailqElement *qele;
+ while (!QTAILQ_EMPTY(&tgt.q)) {
+ qele = QTAILQ_LAST(&tgt.q, TestQtailqHead);
+ QTAILQ_REMOVE(&tgt.q, qele, next);
+ free(qele);
+ qele = NULL;
+ }
+ qemu_fclose(fload);
+}
+
int main(int argc, char **argv)
{
temp_fd = mkstemp(temp_file);
@@ -562,6 +706,9 @@ int main(int argc, char **argv)
test_arr_ptr_str_no0_save);
g_test_add_func("/vmstate/array/ptr/str/no0/load",
test_arr_ptr_str_no0_load);
+ g_test_add_func("/vmstate/qtailq/save/saveq", test_save_q);
+ g_test_add_func("/vmstate/qtailq/load/loadq", test_load_q);
+
g_test_run();
close(temp_fd);
diff --git a/tests/vhost-user-bridge.c b/tests/vhost-user-bridge.c
index 775e031069..8618c20d53 100644
--- a/tests/vhost-user-bridge.c
+++ b/tests/vhost-user-bridge.c
@@ -30,17 +30,9 @@
#define _FILE_OFFSET_BITS 64
#include "qemu/osdep.h"
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <sys/unistd.h>
-#include <sys/eventfd.h>
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <linux/vhost.h>
-
-#include "qemu/atomic.h"
+#include "qemu/iov.h"
#include "standard-headers/linux/virtio_net.h"
-#include "standard-headers/linux/virtio_ring.h"
+#include "contrib/libvhost-user/libvhost-user.h"
#define VHOST_USER_BRIDGE_DEBUG 1
@@ -64,6 +56,17 @@ typedef struct Dispatcher {
Event events[FD_SETSIZE];
} Dispatcher;
+typedef struct VubrDev {
+ VuDev vudev;
+ Dispatcher dispatcher;
+ int backend_udp_sock;
+ struct sockaddr_in backend_udp_dest;
+ int hdrlen;
+ int sock;
+ int ready;
+ int quit;
+} VubrDev;
+
static void
vubr_die(const char *s)
{
@@ -101,8 +104,6 @@ dispatcher_add(Dispatcher *dispr, int sock, void *ctx, CallbackFunc cb)
return 0;
}
-/* dispatcher_remove() is not currently in use but may be useful
- * in the future. */
static int
dispatcher_remove(Dispatcher *dispr, int sock)
{
@@ -157,1039 +158,313 @@ dispatcher_wait(Dispatcher *dispr, uint32_t timeout)
return 0;
}
-typedef struct VubrVirtq {
- int call_fd;
- int kick_fd;
- uint32_t size;
- uint16_t last_avail_index;
- uint16_t last_used_index;
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
- uint64_t log_guest_addr;
- int enable;
-} VubrVirtq;
-
-/* Based on qemu/hw/virtio/vhost-user.c */
-
-#define VHOST_MEMORY_MAX_NREGIONS 8
-#define VHOST_USER_F_PROTOCOL_FEATURES 30
-/* v1.0 compliant. */
-#define VIRTIO_F_VERSION_1 32
-
-#define VHOST_LOG_PAGE 4096
-
-enum VhostUserProtocolFeature {
- VHOST_USER_PROTOCOL_F_MQ = 0,
- VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
- VHOST_USER_PROTOCOL_F_RARP = 2,
-
- VHOST_USER_PROTOCOL_F_MAX
-};
-
-#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
-
-typedef enum VhostUserRequest {
- VHOST_USER_NONE = 0,
- VHOST_USER_GET_FEATURES = 1,
- VHOST_USER_SET_FEATURES = 2,
- VHOST_USER_SET_OWNER = 3,
- VHOST_USER_RESET_OWNER = 4,
- VHOST_USER_SET_MEM_TABLE = 5,
- VHOST_USER_SET_LOG_BASE = 6,
- VHOST_USER_SET_LOG_FD = 7,
- VHOST_USER_SET_VRING_NUM = 8,
- VHOST_USER_SET_VRING_ADDR = 9,
- VHOST_USER_SET_VRING_BASE = 10,
- VHOST_USER_GET_VRING_BASE = 11,
- VHOST_USER_SET_VRING_KICK = 12,
- VHOST_USER_SET_VRING_CALL = 13,
- VHOST_USER_SET_VRING_ERR = 14,
- VHOST_USER_GET_PROTOCOL_FEATURES = 15,
- VHOST_USER_SET_PROTOCOL_FEATURES = 16,
- VHOST_USER_GET_QUEUE_NUM = 17,
- VHOST_USER_SET_VRING_ENABLE = 18,
- VHOST_USER_SEND_RARP = 19,
- VHOST_USER_MAX
-} VhostUserRequest;
-
-typedef struct VhostUserMemoryRegion {
- uint64_t guest_phys_addr;
- uint64_t memory_size;
- uint64_t userspace_addr;
- uint64_t mmap_offset;
-} VhostUserMemoryRegion;
-
-typedef struct VhostUserMemory {
- uint32_t nregions;
- uint32_t padding;
- VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
-} VhostUserMemory;
-
-typedef struct VhostUserLog {
- uint64_t mmap_size;
- uint64_t mmap_offset;
-} VhostUserLog;
-
-typedef struct VhostUserMsg {
- VhostUserRequest request;
-
-#define VHOST_USER_VERSION_MASK (0x3)
-#define VHOST_USER_REPLY_MASK (0x1<<2)
- uint32_t flags;
- uint32_t size; /* the following payload size */
- union {
-#define VHOST_USER_VRING_IDX_MASK (0xff)
-#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
- uint64_t u64;
- struct vhost_vring_state state;
- struct vhost_vring_addr addr;
- VhostUserMemory memory;
- VhostUserLog log;
- } payload;
- int fds[VHOST_MEMORY_MAX_NREGIONS];
- int fd_num;
-} QEMU_PACKED VhostUserMsg;
-
-#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
-
-/* The version of the protocol we support */
-#define VHOST_USER_VERSION (0x1)
-
-#define MAX_NR_VIRTQUEUE (8)
-
-typedef struct VubrDevRegion {
- /* Guest Physical address. */
- uint64_t gpa;
- /* Memory region size. */
- uint64_t size;
- /* QEMU virtual address (userspace). */
- uint64_t qva;
- /* Starting offset in our mmaped space. */
- uint64_t mmap_offset;
- /* Start address of mmaped space. */
- uint64_t mmap_addr;
-} VubrDevRegion;
-
-typedef struct VubrDev {
- int sock;
- Dispatcher dispatcher;
- uint32_t nregions;
- VubrDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
- VubrVirtq vq[MAX_NR_VIRTQUEUE];
- int log_call_fd;
- uint64_t log_size;
- uint8_t *log_table;
- int backend_udp_sock;
- struct sockaddr_in backend_udp_dest;
- int ready;
- uint64_t features;
- int hdrlen;
-} VubrDev;
-
-static const char *vubr_request_str[] = {
- [VHOST_USER_NONE] = "VHOST_USER_NONE",
- [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
- [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
- [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
- [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
- [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
- [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
- [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
- [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
- [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
- [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
- [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
- [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
- [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
- [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
- [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
- [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
- [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
- [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
- [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
- [VHOST_USER_MAX] = "VHOST_USER_MAX",
-};
-
static void
-print_buffer(uint8_t *buf, size_t len)
+vubr_handle_tx(VuDev *dev, int qidx)
{
- int i;
- printf("Raw buffer:\n");
- for (i = 0; i < len; i++) {
- if (i % 16 == 0) {
- printf("\n");
- }
- if (i % 4 == 0) {
- printf(" ");
- }
- printf("%02x ", buf[i]);
- }
- printf("\n............................................................\n");
-}
+ VuVirtq *vq = vu_get_queue(dev, qidx);
+ VubrDev *vubr = container_of(dev, VubrDev, vudev);
+ int hdrlen = vubr->hdrlen;
+ VuVirtqElement *elem = NULL;
-/* Translate guest physical address to our virtual address. */
-static uint64_t
-gpa_to_va(VubrDev *dev, uint64_t guest_addr)
-{
- int i;
-
- /* Find matching memory region. */
- for (i = 0; i < dev->nregions; i++) {
- VubrDevRegion *r = &dev->regions[i];
-
- if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
- return guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
- }
- }
-
- assert(!"address not found in regions");
- return 0;
-}
-
-/* Translate qemu virtual address to our virtual address. */
-static uint64_t
-qva_to_va(VubrDev *dev, uint64_t qemu_addr)
-{
- int i;
+ assert(qidx % 2);
- /* Find matching memory region. */
- for (i = 0; i < dev->nregions; i++) {
- VubrDevRegion *r = &dev->regions[i];
+ for (;;) {
+ ssize_t ret;
+ unsigned int out_num;
+ struct iovec sg[VIRTQUEUE_MAX_SIZE], *out_sg;
- if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
- return qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
+ elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
+ if (!elem) {
+ break;
}
- }
-
- assert(!"address not found in regions");
- return 0;
-}
-static void
-vubr_message_read(int conn_fd, VhostUserMsg *vmsg)
-{
- char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
- struct iovec iov = {
- .iov_base = (char *)vmsg,
- .iov_len = VHOST_USER_HDR_SIZE,
- };
- struct msghdr msg = {
- .msg_iov = &iov,
- .msg_iovlen = 1,
- .msg_control = control,
- .msg_controllen = sizeof(control),
- };
- size_t fd_size;
- struct cmsghdr *cmsg;
- int rc;
-
- rc = recvmsg(conn_fd, &msg, 0);
-
- if (rc == 0) {
- vubr_die("recvmsg");
- fprintf(stderr, "Peer disconnected.\n");
- exit(1);
- }
- if (rc < 0) {
- vubr_die("recvmsg");
- }
-
- vmsg->fd_num = 0;
- for (cmsg = CMSG_FIRSTHDR(&msg);
- cmsg != NULL;
- cmsg = CMSG_NXTHDR(&msg, cmsg))
- {
- if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
- fd_size = cmsg->cmsg_len - CMSG_LEN(0);
- vmsg->fd_num = fd_size / sizeof(int);
- memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
+ out_num = elem->out_num;
+ out_sg = elem->out_sg;
+ if (out_num < 1) {
+ fprintf(stderr, "virtio-net header not in first element\n");
break;
}
- }
-
- if (vmsg->size > sizeof(vmsg->payload)) {
- fprintf(stderr,
- "Error: too big message request: %d, size: vmsg->size: %u, "
- "while sizeof(vmsg->payload) = %zu\n",
- vmsg->request, vmsg->size, sizeof(vmsg->payload));
- exit(1);
- }
-
- if (vmsg->size) {
- rc = read(conn_fd, &vmsg->payload, vmsg->size);
- if (rc == 0) {
- vubr_die("recvmsg");
- fprintf(stderr, "Peer disconnected.\n");
- exit(1);
+ if (VHOST_USER_BRIDGE_DEBUG) {
+ iov_hexdump(out_sg, out_num, stderr, "TX:", 1024);
}
- if (rc < 0) {
- vubr_die("recvmsg");
+
+ if (hdrlen) {
+ unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
+ out_sg, out_num,
+ hdrlen, -1);
+ out_num = sg_num;
+ out_sg = sg;
}
- assert(rc == vmsg->size);
- }
-}
+ struct msghdr msg = {
+ .msg_name = (struct sockaddr *) &vubr->backend_udp_dest,
+ .msg_namelen = sizeof(struct sockaddr_in),
+ .msg_iov = out_sg,
+ .msg_iovlen = out_num,
+ };
+ do {
+ ret = sendmsg(vubr->backend_udp_sock, &msg, 0);
+ } while (ret == -1 && (errno == EAGAIN || errno == EINTR));
-static void
-vubr_message_write(int conn_fd, VhostUserMsg *vmsg)
-{
- int rc;
+ if (ret == -1) {
+ vubr_die("sendmsg()");
+ }
- do {
- rc = write(conn_fd, vmsg, VHOST_USER_HDR_SIZE + vmsg->size);
- } while (rc < 0 && errno == EINTR);
+ vu_queue_push(dev, vq, elem, 0);
+ vu_queue_notify(dev, vq);
- if (rc < 0) {
- vubr_die("write");
+ free(elem);
+ elem = NULL;
}
-}
-static void
-vubr_backend_udp_sendbuf(VubrDev *dev, uint8_t *buf, size_t len)
-{
- int slen = sizeof(struct sockaddr_in);
-
- if (sendto(dev->backend_udp_sock, buf, len, 0,
- (struct sockaddr *) &dev->backend_udp_dest, slen) == -1) {
- vubr_die("sendto()");
- }
+ free(elem);
}
-static int
-vubr_backend_udp_recvbuf(VubrDev *dev, uint8_t *buf, size_t buflen)
+static void
+iov_restore_front(struct iovec *front, struct iovec *iov, size_t bytes)
{
- int slen = sizeof(struct sockaddr_in);
- int rc;
+ struct iovec *cur;
- rc = recvfrom(dev->backend_udp_sock, buf, buflen, 0,
- (struct sockaddr *) &dev->backend_udp_dest,
- (socklen_t *)&slen);
- if (rc == -1) {
- vubr_die("recvfrom()");
+ for (cur = front; front != iov; cur++) {
+ bytes -= cur->iov_len;
}
- return rc;
+ cur->iov_base -= bytes;
+ cur->iov_len += bytes;
}
static void
-vubr_consume_raw_packet(VubrDev *dev, uint8_t *buf, uint32_t len)
+iov_truncate(struct iovec *iov, unsigned iovc, size_t bytes)
{
- int hdrlen = dev->hdrlen;
- DPRINT(" hdrlen = %d\n", dev->hdrlen);
+ unsigned i;
- if (VHOST_USER_BRIDGE_DEBUG) {
- print_buffer(buf, len);
- }
- vubr_backend_udp_sendbuf(dev, buf + hdrlen, len - hdrlen);
-}
+ for (i = 0; i < iovc; i++, iov++) {
+ if (bytes < iov->iov_len) {
+ iov->iov_len = bytes;
+ return;
+ }
-/* Kick the log_call_fd if required. */
-static void
-vubr_log_kick(VubrDev *dev)
-{
- if (dev->log_call_fd != -1) {
- DPRINT("Kicking the QEMU's log...\n");
- eventfd_write(dev->log_call_fd, 1);
+ bytes -= iov->iov_len;
}
-}
-/* Kick the guest if necessary. */
-static void
-vubr_virtqueue_kick(VubrVirtq *vq)
-{
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
- DPRINT("Kicking the guest...\n");
- eventfd_write(vq->call_fd, 1);
- }
+ assert(!"couldn't truncate iov");
}
static void
-vubr_log_page(uint8_t *log_table, uint64_t page)
+vubr_backend_recv_cb(int sock, void *ctx)
{
- DPRINT("Logged dirty guest page: %"PRId64"\n", page);
- atomic_or(&log_table[page / 8], 1 << (page % 8));
-}
+ VubrDev *vubr = (VubrDev *) ctx;
+ VuDev *dev = &vubr->vudev;
+ VuVirtq *vq = vu_get_queue(dev, 0);
+ VuVirtqElement *elem = NULL;
+ struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+ unsigned mhdr_cnt = 0;
+ int hdrlen = vubr->hdrlen;
+ int i = 0;
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
-static void
-vubr_log_write(VubrDev *dev, uint64_t address, uint64_t length)
-{
- uint64_t page;
+ DPRINT("\n\n *** IN UDP RECEIVE CALLBACK ***\n\n");
+ DPRINT(" hdrlen = %d\n", hdrlen);
- if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
- !dev->log_table || !length) {
+ if (!vu_queue_enabled(dev, vq) ||
+ !vu_queue_avail_bytes(dev, vq, hdrlen, 0)) {
+ DPRINT("Got UDP packet, but no available descriptors on RX virtq.\n");
return;
}
- assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
-
- page = address / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < address + length) {
- vubr_log_page(dev->log_table, page);
- page += VHOST_LOG_PAGE;
- }
- vubr_log_kick(dev);
-}
-
-static void
-vubr_post_buffer(VubrDev *dev, VubrVirtq *vq, uint8_t *buf, int32_t len)
-{
- struct vring_desc *desc = vq->desc;
- struct vring_avail *avail = vq->avail;
- struct vring_used *used = vq->used;
- uint64_t log_guest_addr = vq->log_guest_addr;
- int32_t remaining_len = len;
-
- unsigned int size = vq->size;
-
- uint16_t avail_index = atomic_mb_read(&avail->idx);
-
- /* We check the available descriptors before posting the
- * buffer, so here we assume that enough available
- * descriptors. */
- assert(vq->last_avail_index != avail_index);
- uint16_t a_index = vq->last_avail_index % size;
- uint16_t u_index = vq->last_used_index % size;
- uint16_t d_index = avail->ring[a_index];
-
- int i = d_index;
- uint32_t written_len = 0;
-
do {
- DPRINT("Post packet to guest on vq:\n");
- DPRINT(" size = %d\n", vq->size);
- DPRINT(" last_avail_index = %d\n", vq->last_avail_index);
- DPRINT(" last_used_index = %d\n", vq->last_used_index);
- DPRINT(" a_index = %d\n", a_index);
- DPRINT(" u_index = %d\n", u_index);
- DPRINT(" d_index = %d\n", d_index);
- DPRINT(" desc[%d].addr = 0x%016"PRIx64"\n", i, desc[i].addr);
- DPRINT(" desc[%d].len = %d\n", i, desc[i].len);
- DPRINT(" desc[%d].flags = %d\n", i, desc[i].flags);
- DPRINT(" avail->idx = %d\n", avail_index);
- DPRINT(" used->idx = %d\n", used->idx);
-
- if (!(desc[i].flags & VRING_DESC_F_WRITE)) {
- /* FIXME: we should find writable descriptor. */
- fprintf(stderr, "Error: descriptor is not writable. Exiting.\n");
- exit(1);
- }
-
- void *chunk_start = (void *)(uintptr_t)gpa_to_va(dev, desc[i].addr);
- uint32_t chunk_len = desc[i].len;
- uint32_t chunk_write_len = MIN(remaining_len, chunk_len);
+ struct iovec *sg;
+ ssize_t ret, total = 0;
+ unsigned int num;
- memcpy(chunk_start, buf + written_len, chunk_write_len);
- vubr_log_write(dev, desc[i].addr, chunk_write_len);
- remaining_len -= chunk_write_len;
- written_len += chunk_write_len;
-
- if ((remaining_len == 0) || !(desc[i].flags & VRING_DESC_F_NEXT)) {
+ elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
+ if (!elem) {
break;
}
- i = desc[i].next;
- } while (1);
-
- if (remaining_len > 0) {
- fprintf(stderr,
- "Too long packet for RX, remaining_len = %d, Dropping...\n",
- remaining_len);
- return;
- }
-
- /* Add descriptor to the used ring. */
- used->ring[u_index].id = d_index;
- used->ring[u_index].len = len;
- vubr_log_write(dev,
- log_guest_addr + offsetof(struct vring_used, ring[u_index]),
- sizeof(used->ring[u_index]));
-
- vq->last_avail_index++;
- vq->last_used_index++;
-
- atomic_mb_set(&used->idx, vq->last_used_index);
- vubr_log_write(dev,
- log_guest_addr + offsetof(struct vring_used, idx),
- sizeof(used->idx));
-
- /* Kick the guest if necessary. */
- vubr_virtqueue_kick(vq);
-}
-
-static int
-vubr_process_desc(VubrDev *dev, VubrVirtq *vq)
-{
- struct vring_desc *desc = vq->desc;
- struct vring_avail *avail = vq->avail;
- struct vring_used *used = vq->used;
- uint64_t log_guest_addr = vq->log_guest_addr;
-
- unsigned int size = vq->size;
-
- uint16_t a_index = vq->last_avail_index % size;
- uint16_t u_index = vq->last_used_index % size;
- uint16_t d_index = avail->ring[a_index];
-
- uint32_t i, len = 0;
- size_t buf_size = 4096;
- uint8_t buf[4096];
-
- DPRINT("Chunks: ");
- i = d_index;
- do {
- void *chunk_start = (void *)(uintptr_t)gpa_to_va(dev, desc[i].addr);
- uint32_t chunk_len = desc[i].len;
-
- assert(!(desc[i].flags & VRING_DESC_F_WRITE));
-
- if (len + chunk_len < buf_size) {
- memcpy(buf + len, chunk_start, chunk_len);
- DPRINT("%d ", chunk_len);
- } else {
- fprintf(stderr, "Error: too long packet. Dropping...\n");
+ if (elem->in_num < 1) {
+ fprintf(stderr, "virtio-net contains no in buffers\n");
break;
}
- len += chunk_len;
-
- if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
- break;
+ sg = elem->in_sg;
+ num = elem->in_num;
+ if (i == 0) {
+ if (hdrlen == 12) {
+ mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
+ sg, elem->in_num,
+ offsetof(typeof(mhdr), num_buffers),
+ sizeof(mhdr.num_buffers));
+ }
+ iov_from_buf(sg, elem->in_num, 0, &hdr, sizeof hdr);
+ total += hdrlen;
+ assert(iov_discard_front(&sg, &num, hdrlen) == hdrlen);
}
- i = desc[i].next;
- } while (1);
- DPRINT("\n");
-
- if (!len) {
- return -1;
- }
-
- /* Add descriptor to the used ring. */
- used->ring[u_index].id = d_index;
- used->ring[u_index].len = len;
- vubr_log_write(dev,
- log_guest_addr + offsetof(struct vring_used, ring[u_index]),
- sizeof(used->ring[u_index]));
-
- vubr_consume_raw_packet(dev, buf, len);
-
- return 0;
-}
+ struct msghdr msg = {
+ .msg_name = (struct sockaddr *) &vubr->backend_udp_dest,
+ .msg_namelen = sizeof(struct sockaddr_in),
+ .msg_iov = sg,
+ .msg_iovlen = elem->in_num,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ do {
+ ret = recvmsg(vubr->backend_udp_sock, &msg, 0);
+ } while (ret == -1 && (errno == EINTR));
-static void
-vubr_process_avail(VubrDev *dev, VubrVirtq *vq)
-{
- struct vring_avail *avail = vq->avail;
- struct vring_used *used = vq->used;
- uint64_t log_guest_addr = vq->log_guest_addr;
-
- while (vq->last_avail_index != atomic_mb_read(&avail->idx)) {
- vubr_process_desc(dev, vq);
- vq->last_avail_index++;
- vq->last_used_index++;
- }
+ if (i == 0) {
+ iov_restore_front(elem->in_sg, sg, hdrlen);
+ }
- atomic_mb_set(&used->idx, vq->last_used_index);
- vubr_log_write(dev,
- log_guest_addr + offsetof(struct vring_used, idx),
- sizeof(used->idx));
-}
+ if (ret == -1) {
+ if (errno == EWOULDBLOCK) {
+ vu_queue_rewind(dev, vq, 1);
+ break;
+ }
-static void
-vubr_backend_recv_cb(int sock, void *ctx)
-{
- VubrDev *dev = (VubrDev *) ctx;
- VubrVirtq *rx_vq = &dev->vq[0];
- uint8_t buf[4096];
- struct virtio_net_hdr_v1 *hdr = (struct virtio_net_hdr_v1 *)buf;
- int hdrlen = dev->hdrlen;
- int buflen = sizeof(buf);
- int len;
-
- if (!dev->ready) {
- return;
- }
+ vubr_die("recvmsg()");
+ }
- DPRINT("\n\n *** IN UDP RECEIVE CALLBACK ***\n\n");
- DPRINT(" hdrlen = %d\n", hdrlen);
+ total += ret;
+ iov_truncate(elem->in_sg, elem->in_num, total);
+ vu_queue_fill(dev, vq, elem, total, i++);
- uint16_t avail_index = atomic_mb_read(&rx_vq->avail->idx);
+ free(elem);
+ elem = NULL;
+ } while (false); /* could loop if DONTWAIT worked? */
- /* If there is no available descriptors, just do nothing.
- * The buffer will be handled by next arrived UDP packet,
- * or next kick on receive virtq. */
- if (rx_vq->last_avail_index == avail_index) {
- DPRINT("Got UDP packet, but no available descriptors on RX virtq.\n");
- return;
+ if (mhdr_cnt) {
+ mhdr.num_buffers = i;
+ iov_from_buf(mhdr_sg, mhdr_cnt,
+ 0,
+ &mhdr.num_buffers, sizeof mhdr.num_buffers);
}
- memset(buf, 0, hdrlen);
- /* TODO: support mergeable buffers. */
- if (hdrlen == 12)
- hdr->num_buffers = 1;
- len = vubr_backend_udp_recvbuf(dev, buf + hdrlen, buflen - hdrlen);
+ vu_queue_flush(dev, vq, i);
+ vu_queue_notify(dev, vq);
- vubr_post_buffer(dev, rx_vq, buf, len + hdrlen);
+ free(elem);
}
static void
-vubr_kick_cb(int sock, void *ctx)
+vubr_receive_cb(int sock, void *ctx)
{
- VubrDev *dev = (VubrDev *) ctx;
- eventfd_t kick_data;
- ssize_t rc;
+ VubrDev *vubr = (VubrDev *)ctx;
- rc = eventfd_read(sock, &kick_data);
- if (rc == -1) {
- vubr_die("eventfd_read()");
- } else {
- DPRINT("Got kick_data: %016"PRIx64"\n", kick_data);
- vubr_process_avail(dev, &dev->vq[1]);
+ if (!vu_dispatch(&vubr->vudev)) {
+ fprintf(stderr, "Error while dispatching\n");
}
}
-static int
-vubr_none_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- DPRINT("Function %s() not implemented yet.\n", __func__);
- return 0;
-}
+typedef struct WatchData {
+ VuDev *dev;
+ vu_watch_cb cb;
+ void *data;
+} WatchData;
-static int
-vubr_get_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
+static void
+watch_cb(int sock, void *ctx)
{
- vmsg->payload.u64 =
- ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << VHOST_F_LOG_ALL) |
- (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
- (1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
-
- vmsg->size = sizeof(vmsg->payload.u64);
+ struct WatchData *wd = ctx;
- DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
-
- /* Reply */
- return 1;
+ wd->cb(wd->dev, VU_WATCH_IN, wd->data);
}
-static int
-vubr_set_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
+static void
+vubr_set_watch(VuDev *dev, int fd, int condition,
+ vu_watch_cb cb, void *data)
{
- DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
-
- dev->features = vmsg->payload.u64;
- if ((dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
- (dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))) {
- dev->hdrlen = 12;
- } else {
- dev->hdrlen = 10;
- }
+ VubrDev *vubr = container_of(dev, VubrDev, vudev);
+ static WatchData watches[FD_SETSIZE];
+ struct WatchData *wd = &watches[fd];
- return 0;
-}
-
-static int
-vubr_set_owner_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- return 0;
+ wd->cb = cb;
+ wd->data = data;
+ wd->dev = dev;
+ dispatcher_add(&vubr->dispatcher, fd, wd, watch_cb);
}
static void
-vubr_close_log(VubrDev *dev)
+vubr_remove_watch(VuDev *dev, int fd)
{
- if (dev->log_table) {
- if (munmap(dev->log_table, dev->log_size) != 0) {
- vubr_die("munmap()");
- }
+ VubrDev *vubr = container_of(dev, VubrDev, vudev);
- dev->log_table = 0;
- }
- if (dev->log_call_fd != -1) {
- close(dev->log_call_fd);
- dev->log_call_fd = -1;
- }
-}
-
-static int
-vubr_reset_device_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- vubr_close_log(dev);
- dev->ready = 0;
- dev->features = 0;
- return 0;
+ dispatcher_remove(&vubr->dispatcher, fd);
}
static int
-vubr_set_mem_table_exec(VubrDev *dev, VhostUserMsg *vmsg)
+vubr_send_rarp_exec(VuDev *dev, VhostUserMsg *vmsg)
{
- int i;
- VhostUserMemory *memory = &vmsg->payload.memory;
- dev->nregions = memory->nregions;
-
- DPRINT("Nregions: %d\n", memory->nregions);
- for (i = 0; i < dev->nregions; i++) {
- void *mmap_addr;
- VhostUserMemoryRegion *msg_region = &memory->regions[i];
- VubrDevRegion *dev_region = &dev->regions[i];
-
- DPRINT("Region %d\n", i);
- DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
- msg_region->guest_phys_addr);
- DPRINT(" memory_size: 0x%016"PRIx64"\n",
- msg_region->memory_size);
- DPRINT(" userspace_addr 0x%016"PRIx64"\n",
- msg_region->userspace_addr);
- DPRINT(" mmap_offset 0x%016"PRIx64"\n",
- msg_region->mmap_offset);
-
- dev_region->gpa = msg_region->guest_phys_addr;
- dev_region->size = msg_region->memory_size;
- dev_region->qva = msg_region->userspace_addr;
- dev_region->mmap_offset = msg_region->mmap_offset;
-
- /* We don't use offset argument of mmap() since the
- * mapped address has to be page aligned, and we use huge
- * pages. */
- mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- vmsg->fds[i], 0);
-
- if (mmap_addr == MAP_FAILED) {
- vubr_die("mmap");
- }
- dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
- DPRINT(" mmap_addr: 0x%016"PRIx64"\n", dev_region->mmap_addr);
-
- close(vmsg->fds[i]);
- }
-
+ DPRINT("Function %s() not implemented yet.\n", __func__);
return 0;
}
static int
-vubr_set_log_base_exec(VubrDev *dev, VhostUserMsg *vmsg)
+vubr_process_msg(VuDev *dev, VhostUserMsg *vmsg, int *do_reply)
{
- int fd;
- uint64_t log_mmap_size, log_mmap_offset;
- void *rc;
-
- assert(vmsg->fd_num == 1);
- fd = vmsg->fds[0];
-
- assert(vmsg->size == sizeof(vmsg->payload.log));
- log_mmap_offset = vmsg->payload.log.mmap_offset;
- log_mmap_size = vmsg->payload.log.mmap_size;
- DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
- DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
-
- rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
- log_mmap_offset);
- if (rc == MAP_FAILED) {
- vubr_die("mmap");
+ switch (vmsg->request) {
+ case VHOST_USER_SEND_RARP:
+ *do_reply = vubr_send_rarp_exec(dev, vmsg);
+ return 1;
+ default:
+ /* let the library handle the rest */
+ return 0;
}
- dev->log_table = rc;
- dev->log_size = log_mmap_size;
- vmsg->size = sizeof(vmsg->payload.u64);
- /* Reply */
- return 1;
-}
-
-static int
-vubr_set_log_fd_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- assert(vmsg->fd_num == 1);
- dev->log_call_fd = vmsg->fds[0];
- DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
return 0;
}
-static int
-vubr_set_vring_num_exec(VubrDev *dev, VhostUserMsg *vmsg)
+static void
+vubr_set_features(VuDev *dev, uint64_t features)
{
- unsigned int index = vmsg->payload.state.index;
- unsigned int num = vmsg->payload.state.num;
-
- DPRINT("State.index: %d\n", index);
- DPRINT("State.num: %d\n", num);
- dev->vq[index].size = num;
- return 0;
-}
+ VubrDev *vubr = container_of(dev, VubrDev, vudev);
-static int
-vubr_set_vring_addr_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- struct vhost_vring_addr *vra = &vmsg->payload.addr;
- unsigned int index = vra->index;
- VubrVirtq *vq = &dev->vq[index];
-
- DPRINT("vhost_vring_addr:\n");
- DPRINT(" index: %d\n", vra->index);
- DPRINT(" flags: %d\n", vra->flags);
- DPRINT(" desc_user_addr: 0x%016llx\n", vra->desc_user_addr);
- DPRINT(" used_user_addr: 0x%016llx\n", vra->used_user_addr);
- DPRINT(" avail_user_addr: 0x%016llx\n", vra->avail_user_addr);
- DPRINT(" log_guest_addr: 0x%016llx\n", vra->log_guest_addr);
-
- vq->desc = (struct vring_desc *)(uintptr_t)qva_to_va(dev, vra->desc_user_addr);
- vq->used = (struct vring_used *)(uintptr_t)qva_to_va(dev, vra->used_user_addr);
- vq->avail = (struct vring_avail *)(uintptr_t)qva_to_va(dev, vra->avail_user_addr);
- vq->log_guest_addr = vra->log_guest_addr;
-
- DPRINT("Setting virtq addresses:\n");
- DPRINT(" vring_desc at %p\n", vq->desc);
- DPRINT(" vring_used at %p\n", vq->used);
- DPRINT(" vring_avail at %p\n", vq->avail);
-
- vq->last_used_index = vq->used->idx;
-
- if (vq->last_avail_index != vq->used->idx) {
- DPRINT("Last avail index != used index: %d != %d, resuming",
- vq->last_avail_index, vq->used->idx);
- vq->last_avail_index = vq->used->idx;
+ if ((features & (1ULL << VIRTIO_F_VERSION_1)) ||
+ (features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))) {
+ vubr->hdrlen = 12;
+ } else {
+ vubr->hdrlen = 10;
}
-
- return 0;
}
-static int
-vubr_set_vring_base_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- unsigned int index = vmsg->payload.state.index;
- unsigned int num = vmsg->payload.state.num;
-
- DPRINT("State.index: %d\n", index);
- DPRINT("State.num: %d\n", num);
- dev->vq[index].last_avail_index = num;
-
- return 0;
-}
-
-static int
-vubr_get_vring_base_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- unsigned int index = vmsg->payload.state.index;
-
- DPRINT("State.index: %d\n", index);
- vmsg->payload.state.num = dev->vq[index].last_avail_index;
- vmsg->size = sizeof(vmsg->payload.state);
- /* FIXME: this is a work-around for a bug in QEMU enabling
- * too early vrings. When protocol features are enabled,
- * we have to respect * VHOST_USER_SET_VRING_ENABLE request. */
- dev->ready = 0;
-
- if (dev->vq[index].call_fd != -1) {
- close(dev->vq[index].call_fd);
- dispatcher_remove(&dev->dispatcher, dev->vq[index].call_fd);
- dev->vq[index].call_fd = -1;
- }
- if (dev->vq[index].kick_fd != -1) {
- close(dev->vq[index].kick_fd);
- dispatcher_remove(&dev->dispatcher, dev->vq[index].kick_fd);
- dev->vq[index].kick_fd = -1;
- }
-
- /* Reply */
- return 1;
-}
-
-static int
-vubr_set_vring_kick_exec(VubrDev *dev, VhostUserMsg *vmsg)
+static uint64_t
+vubr_get_features(VuDev *dev)
{
- uint64_t u64_arg = vmsg->payload.u64;
- int index = u64_arg & VHOST_USER_VRING_IDX_MASK;
-
- DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
-
- assert((u64_arg & VHOST_USER_VRING_NOFD_MASK) == 0);
- assert(vmsg->fd_num == 1);
-
- if (dev->vq[index].kick_fd != -1) {
- close(dev->vq[index].kick_fd);
- dispatcher_remove(&dev->dispatcher, dev->vq[index].kick_fd);
- }
- dev->vq[index].kick_fd = vmsg->fds[0];
- DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
-
- if (index % 2 == 1) {
- /* TX queue. */
- dispatcher_add(&dev->dispatcher, dev->vq[index].kick_fd,
- dev, vubr_kick_cb);
-
- DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
- dev->vq[index].kick_fd, index);
- }
- /* We temporarily use this hack to determine that both TX and RX
- * queues are set up and ready for processing.
- * FIXME: we need to rely in VHOST_USER_SET_VRING_ENABLE and
- * actual kicks. */
- if (dev->vq[0].kick_fd != -1 &&
- dev->vq[1].kick_fd != -1) {
- dev->ready = 1;
- DPRINT("vhost-user-bridge is ready for processing queues.\n");
- }
- return 0;
-
+ return 1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE |
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF;
}
-static int
-vubr_set_vring_call_exec(VubrDev *dev, VhostUserMsg *vmsg)
+static void
+vubr_queue_set_started(VuDev *dev, int qidx, bool started)
{
- uint64_t u64_arg = vmsg->payload.u64;
- int index = u64_arg & VHOST_USER_VRING_IDX_MASK;
+ VuVirtq *vq = vu_get_queue(dev, qidx);
- DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
- assert((u64_arg & VHOST_USER_VRING_NOFD_MASK) == 0);
- assert(vmsg->fd_num == 1);
-
- if (dev->vq[index].call_fd != -1) {
- close(dev->vq[index].call_fd);
- dispatcher_remove(&dev->dispatcher, dev->vq[index].call_fd);
+ if (qidx % 2 == 1) {
+ vu_set_queue_handler(dev, vq, started ? vubr_handle_tx : NULL);
}
- dev->vq[index].call_fd = vmsg->fds[0];
- DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
-
- return 0;
-}
-
-static int
-vubr_set_vring_err_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
- return 0;
-}
-
-static int
-vubr_get_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- vmsg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
- DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
- vmsg->size = sizeof(vmsg->payload.u64);
-
- /* Reply */
- return 1;
}
-static int
-vubr_set_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- /* FIXME: unimplented */
- DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
- return 0;
-}
-
-static int
-vubr_get_queue_num_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- DPRINT("Function %s() not implemented yet.\n", __func__);
- return 0;
-}
-
-static int
-vubr_set_vring_enable_exec(VubrDev *dev, VhostUserMsg *vmsg)
+static void
+vubr_panic(VuDev *dev, const char *msg)
{
- unsigned int index = vmsg->payload.state.index;
- unsigned int enable = vmsg->payload.state.num;
+ VubrDev *vubr = container_of(dev, VubrDev, vudev);
- DPRINT("State.index: %d\n", index);
- DPRINT("State.enable: %d\n", enable);
- dev->vq[index].enable = enable;
- return 0;
-}
+ fprintf(stderr, "PANIC: %s\n", msg);
-static int
-vubr_send_rarp_exec(VubrDev *dev, VhostUserMsg *vmsg)
-{
- DPRINT("Function %s() not implemented yet.\n", __func__);
- return 0;
+ dispatcher_remove(&vubr->dispatcher, dev->sock);
+ vubr->quit = 1;
}
-static int
-vubr_execute_request(VubrDev *dev, VhostUserMsg *vmsg)
-{
- /* Print out generic part of the request. */
- DPRINT(
- "================== Vhost user message from QEMU ==================\n");
- DPRINT("Request: %s (%d)\n", vubr_request_str[vmsg->request],
- vmsg->request);
- DPRINT("Flags: 0x%x\n", vmsg->flags);
- DPRINT("Size: %d\n", vmsg->size);
-
- if (vmsg->fd_num) {
- int i;
- DPRINT("Fds:");
- for (i = 0; i < vmsg->fd_num; i++) {
- DPRINT(" %d", vmsg->fds[i]);
- }
- DPRINT("\n");
- }
-
- switch (vmsg->request) {
- case VHOST_USER_NONE:
- return vubr_none_exec(dev, vmsg);
- case VHOST_USER_GET_FEATURES:
- return vubr_get_features_exec(dev, vmsg);
- case VHOST_USER_SET_FEATURES:
- return vubr_set_features_exec(dev, vmsg);
- case VHOST_USER_SET_OWNER:
- return vubr_set_owner_exec(dev, vmsg);
- case VHOST_USER_RESET_OWNER:
- return vubr_reset_device_exec(dev, vmsg);
- case VHOST_USER_SET_MEM_TABLE:
- return vubr_set_mem_table_exec(dev, vmsg);
- case VHOST_USER_SET_LOG_BASE:
- return vubr_set_log_base_exec(dev, vmsg);
- case VHOST_USER_SET_LOG_FD:
- return vubr_set_log_fd_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_NUM:
- return vubr_set_vring_num_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_ADDR:
- return vubr_set_vring_addr_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_BASE:
- return vubr_set_vring_base_exec(dev, vmsg);
- case VHOST_USER_GET_VRING_BASE:
- return vubr_get_vring_base_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_KICK:
- return vubr_set_vring_kick_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_CALL:
- return vubr_set_vring_call_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_ERR:
- return vubr_set_vring_err_exec(dev, vmsg);
- case VHOST_USER_GET_PROTOCOL_FEATURES:
- return vubr_get_protocol_features_exec(dev, vmsg);
- case VHOST_USER_SET_PROTOCOL_FEATURES:
- return vubr_set_protocol_features_exec(dev, vmsg);
- case VHOST_USER_GET_QUEUE_NUM:
- return vubr_get_queue_num_exec(dev, vmsg);
- case VHOST_USER_SET_VRING_ENABLE:
- return vubr_set_vring_enable_exec(dev, vmsg);
- case VHOST_USER_SEND_RARP:
- return vubr_send_rarp_exec(dev, vmsg);
-
- case VHOST_USER_MAX:
- assert(vmsg->request != VHOST_USER_MAX);
- }
- return 0;
-}
-
-static void
-vubr_receive_cb(int sock, void *ctx)
-{
- VubrDev *dev = (VubrDev *) ctx;
- VhostUserMsg vmsg;
- int reply_requested;
-
- vubr_message_read(sock, &vmsg);
- reply_requested = vubr_execute_request(dev, &vmsg);
- if (reply_requested) {
- /* Set the version in the flags when sending the reply */
- vmsg.flags &= ~VHOST_USER_VERSION_MASK;
- vmsg.flags |= VHOST_USER_VERSION;
- vmsg.flags |= VHOST_USER_REPLY_MASK;
- vubr_message_write(sock, &vmsg);
- }
-}
+static const VuDevIface vuiface = {
+ .get_features = vubr_get_features,
+ .set_features = vubr_set_features,
+ .process_msg = vubr_process_msg,
+ .queue_set_started = vubr_queue_set_started,
+};
static void
vubr_accept_cb(int sock, void *ctx)
@@ -1204,36 +479,26 @@ vubr_accept_cb(int sock, void *ctx)
vubr_die("accept()");
}
DPRINT("Got connection from remote peer on sock %d\n", conn_fd);
+
+ vu_init(&dev->vudev,
+ conn_fd,
+ vubr_panic,
+ vubr_set_watch,
+ vubr_remove_watch,
+ &vuiface);
+
dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb);
+ dispatcher_remove(&dev->dispatcher, sock);
}
static VubrDev *
vubr_new(const char *path, bool client)
{
VubrDev *dev = (VubrDev *) calloc(1, sizeof(VubrDev));
- dev->nregions = 0;
- int i;
struct sockaddr_un un;
CallbackFunc cb;
size_t len;
- for (i = 0; i < MAX_NR_VIRTQUEUE; i++) {
- dev->vq[i] = (VubrVirtq) {
- .call_fd = -1, .kick_fd = -1,
- .size = 0,
- .last_avail_index = 0, .last_used_index = 0,
- .desc = 0, .avail = 0, .used = 0,
- .enable = 0,
- };
- }
-
- /* Init log */
- dev->log_call_fd = -1;
- dev->log_size = 0;
- dev->log_table = 0;
- dev->ready = 0;
- dev->features = 0;
-
/* Get a UNIX socket. */
dev->sock = socket(AF_UNIX, SOCK_STREAM, 0);
if (dev->sock == -1) {
@@ -1261,10 +526,17 @@ vubr_new(const char *path, bool client)
if (connect(dev->sock, (struct sockaddr *)&un, len) == -1) {
vubr_die("connect");
}
+ vu_init(&dev->vudev,
+ dev->sock,
+ vubr_panic,
+ vubr_set_watch,
+ vubr_remove_watch,
+ &vuiface);
cb = vubr_receive_cb;
}
dispatcher_init(&dev->dispatcher);
+
dispatcher_add(&dev->dispatcher, dev->sock, (void *)dev, cb);
return dev;
@@ -1345,7 +617,7 @@ vubr_backend_udp_setup(VubrDev *dev,
static void
vubr_run(VubrDev *dev)
{
- while (1) {
+ while (!dev->quit) {
/* timeout 200ms */
dispatcher_wait(&dev->dispatcher, 200000);
/* Here one can try polling strategy. */
@@ -1421,6 +693,9 @@ main(int argc, char *argv[])
vubr_backend_udp_setup(dev, lhost, lport, rhost, rport);
vubr_run(dev);
+
+ vu_deinit(&dev->vudev);
+
return 0;
out:
diff --git a/tests/virtio-9p-test.c b/tests/virtio-9p-test.c
index 9c4f6cb406..060407b20e 100644
--- a/tests/virtio-9p-test.c
+++ b/tests/virtio-9p-test.c
@@ -16,61 +16,53 @@
#include "libqos/virtio-pci.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_pci.h"
+#include "hw/9pfs/9p.h"
static const char mount_tag[] = "qtest";
-static char *test_share;
+typedef struct {
+ QVirtioDevice *dev;
+ QOSState *qs;
+ QVirtQueue *vq;
+ char *test_share;
+ uint16_t p9_req_tag;
+} QVirtIO9P;
-static QOSState *qvirtio_9p_start(void)
+static QVirtIO9P *qvirtio_9p_start(const char *driver)
{
const char *arch = qtest_get_arch();
const char *cmd = "-fsdev local,id=fsdev0,security_model=none,path=%s "
- "-device virtio-9p-pci,fsdev=fsdev0,mount_tag=%s";
+ "-device %s,fsdev=fsdev0,mount_tag=%s";
+ QVirtIO9P *v9p = g_new0(QVirtIO9P, 1);
- test_share = g_strdup("/tmp/qtest.XXXXXX");
- g_assert_nonnull(mkdtemp(test_share));
+ v9p->test_share = g_strdup("/tmp/qtest.XXXXXX");
+ g_assert_nonnull(mkdtemp(v9p->test_share));
if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) {
- return qtest_pc_boot(cmd, test_share, mount_tag);
- }
- if (strcmp(arch, "ppc64") == 0) {
- return qtest_spapr_boot(cmd, test_share, mount_tag);
+ v9p->qs = qtest_pc_boot(cmd, v9p->test_share, driver, mount_tag);
+ } else if (strcmp(arch, "ppc64") == 0) {
+ v9p->qs = qtest_spapr_boot(cmd, v9p->test_share, driver, mount_tag);
+ } else {
+ g_printerr("virtio-9p tests are only available on x86 or ppc64\n");
+ exit(EXIT_FAILURE);
}
- g_printerr("virtio-9p tests are only available on x86 or ppc64\n");
- exit(EXIT_FAILURE);
-}
-
-static void qvirtio_9p_stop(QOSState *qs)
-{
- qtest_shutdown(qs);
- rmdir(test_share);
- g_free(test_share);
+ return v9p;
}
-static void pci_nop(void)
+static void qvirtio_9p_stop(QVirtIO9P *v9p)
{
- QOSState *qs;
-
- qs = qvirtio_9p_start();
- qvirtio_9p_stop(qs);
+ qtest_shutdown(v9p->qs);
+ rmdir(v9p->test_share);
+ g_free(v9p->test_share);
+ g_free(v9p);
}
-typedef struct {
- QVirtioDevice *dev;
- QOSState *qs;
- QVirtQueue *vq;
-} QVirtIO9P;
-
-static QVirtIO9P *qvirtio_9p_pci_init(QOSState *qs)
+static QVirtIO9P *qvirtio_9p_pci_start(void)
{
- QVirtIO9P *v9p;
- QVirtioPCIDevice *dev;
-
- v9p = g_new0(QVirtIO9P, 1);
-
- v9p->qs = qs;
- dev = qvirtio_pci_device_find(v9p->qs->pcibus, VIRTIO_ID_9P);
+ QVirtIO9P *v9p = qvirtio_9p_start("virtio-9p-pci");
+ QVirtioPCIDevice *dev = qvirtio_pci_device_find(v9p->qs->pcibus,
+ VIRTIO_ID_9P);
g_assert_nonnull(dev);
g_assert_cmphex(dev->vdev.device_type, ==, VIRTIO_ID_9P);
v9p->dev = (QVirtioDevice *) dev;
@@ -84,26 +76,20 @@ static QVirtIO9P *qvirtio_9p_pci_init(QOSState *qs)
return v9p;
}
-static void qvirtio_9p_pci_free(QVirtIO9P *v9p)
+static void qvirtio_9p_pci_stop(QVirtIO9P *v9p)
{
qvirtqueue_cleanup(v9p->dev->bus, v9p->vq, v9p->qs->alloc);
qvirtio_pci_device_disable(container_of(v9p->dev, QVirtioPCIDevice, vdev));
g_free(v9p->dev);
- g_free(v9p);
+ qvirtio_9p_stop(v9p);
}
-static void pci_basic_config(void)
+static void pci_config(QVirtIO9P *v9p)
{
- QVirtIO9P *v9p;
- size_t tag_len;
+ size_t tag_len = qvirtio_config_readw(v9p->dev, 0);
char *tag;
int i;
- QOSState *qs;
- qs = qvirtio_9p_start();
- v9p = qvirtio_9p_pci_init(qs);
-
- tag_len = qvirtio_config_readw(v9p->dev, 0);
g_assert_cmpint(tag_len, ==, strlen(mount_tag));
tag = g_malloc(tag_len);
@@ -112,16 +98,406 @@ static void pci_basic_config(void)
}
g_assert_cmpmem(tag, tag_len, mount_tag, tag_len);
g_free(tag);
+}
+
+#define P9_MAX_SIZE 4096 /* Max size of a T-message or R-message */
+
+typedef struct {
+ QVirtIO9P *v9p;
+ uint16_t tag;
+ uint64_t t_msg;
+ uint32_t t_size;
+ uint64_t r_msg;
+ /* No r_size, it is hardcoded to P9_MAX_SIZE */
+ size_t t_off;
+ size_t r_off;
+} P9Req;
+
+static void v9fs_memwrite(P9Req *req, const void *addr, size_t len)
+{
+ memwrite(req->t_msg + req->t_off, addr, len);
+ req->t_off += len;
+}
+
+static void v9fs_memskip(P9Req *req, size_t len)
+{
+ req->r_off += len;
+}
+
+static void v9fs_memrewind(P9Req *req, size_t len)
+{
+ req->r_off -= len;
+}
+
+static void v9fs_memread(P9Req *req, void *addr, size_t len)
+{
+ memread(req->r_msg + req->r_off, addr, len);
+ req->r_off += len;
+}
+
+static void v9fs_uint16_write(P9Req *req, uint16_t val)
+{
+ uint16_t le_val = cpu_to_le16(val);
+
+ v9fs_memwrite(req, &le_val, 2);
+}
+
+static void v9fs_uint16_read(P9Req *req, uint16_t *val)
+{
+ v9fs_memread(req, val, 2);
+ le16_to_cpus(val);
+}
+
+static void v9fs_uint32_write(P9Req *req, uint32_t val)
+{
+ uint32_t le_val = cpu_to_le32(val);
+
+ v9fs_memwrite(req, &le_val, 4);
+}
+
+static void v9fs_uint32_read(P9Req *req, uint32_t *val)
+{
+ v9fs_memread(req, val, 4);
+ le32_to_cpus(val);
+}
+
+/* len[2] string[len] */
+static uint16_t v9fs_string_size(const char *string)
+{
+ size_t len = strlen(string);
+
+ g_assert_cmpint(len, <=, UINT16_MAX);
+
+ return 2 + len;
+}
+
+static void v9fs_string_write(P9Req *req, const char *string)
+{
+ int len = strlen(string);
+
+ g_assert_cmpint(len, <=, UINT16_MAX);
+
+ v9fs_uint16_write(req, (uint16_t) len);
+ v9fs_memwrite(req, string, len);
+}
+
+static void v9fs_string_read(P9Req *req, uint16_t *len, char **string)
+{
+ uint16_t local_len;
+
+ v9fs_uint16_read(req, &local_len);
+ if (len) {
+ *len = local_len;
+ }
+ if (string) {
+ *string = g_malloc(local_len);
+ v9fs_memread(req, *string, local_len);
+ } else {
+ v9fs_memskip(req, local_len);
+ }
+}
+
+ typedef struct {
+ uint32_t size;
+ uint8_t id;
+ uint16_t tag;
+} QEMU_PACKED P9Hdr;
+
+static P9Req *v9fs_req_init(QVirtIO9P *v9p, uint32_t size, uint8_t id,
+ uint16_t tag)
+{
+ P9Req *req = g_new0(P9Req, 1);
+ uint32_t t_size = 7 + size; /* 9P header has well-known size of 7 bytes */
+ P9Hdr hdr = {
+ .size = cpu_to_le32(t_size),
+ .id = id,
+ .tag = cpu_to_le16(tag)
+ };
+
+ g_assert_cmpint(t_size, <=, P9_MAX_SIZE);
- qvirtio_9p_pci_free(v9p);
- qvirtio_9p_stop(qs);
+ req->v9p = v9p;
+ req->t_size = t_size;
+ req->t_msg = guest_alloc(v9p->qs->alloc, req->t_size);
+ v9fs_memwrite(req, &hdr, 7);
+ req->tag = tag;
+ return req;
+}
+
+static void v9fs_req_send(P9Req *req)
+{
+ QVirtIO9P *v9p = req->v9p;
+ uint32_t free_head;
+
+ req->r_msg = guest_alloc(v9p->qs->alloc, P9_MAX_SIZE);
+ free_head = qvirtqueue_add(v9p->vq, req->t_msg, req->t_size, false, true);
+ qvirtqueue_add(v9p->vq, req->r_msg, P9_MAX_SIZE, true, false);
+ qvirtqueue_kick(v9p->dev, v9p->vq, free_head);
+ req->t_off = 0;
+}
+
+static void v9fs_req_recv(P9Req *req, uint8_t id)
+{
+ QVirtIO9P *v9p = req->v9p;
+ P9Hdr hdr;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ qvirtio_wait_queue_isr(v9p->dev, v9p->vq, 1000 * 1000);
+
+ v9fs_memread(req, &hdr, 7);
+ le32_to_cpus(&hdr.size);
+ le16_to_cpus(&hdr.tag);
+ if (hdr.size >= 7) {
+ break;
+ }
+ v9fs_memrewind(req, 7);
+ }
+
+ g_assert_cmpint(hdr.size, >=, 7);
+ g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE);
+ g_assert_cmpint(hdr.tag, ==, req->tag);
+
+ if (hdr.id != id && hdr.id == P9_RLERROR) {
+ uint32_t err;
+ v9fs_uint32_read(req, &err);
+ g_printerr("Received Rlerror (%d) instead of Response %d\n", err, id);
+ g_assert_not_reached();
+ }
+ g_assert_cmpint(hdr.id, ==, id);
+}
+
+static void v9fs_req_free(P9Req *req)
+{
+ QVirtIO9P *v9p = req->v9p;
+
+ guest_free(v9p->qs->alloc, req->t_msg);
+ guest_free(v9p->qs->alloc, req->r_msg);
+ g_free(req);
+}
+
+/* size[4] Rlerror tag[2] ecode[4] */
+static void v9fs_rlerror(P9Req *req, uint32_t *err)
+{
+ v9fs_req_recv(req, P9_RLERROR);
+ v9fs_uint32_read(req, err);
+ v9fs_req_free(req);
+}
+
+/* size[4] Tversion tag[2] msize[4] version[s] */
+static P9Req *v9fs_tversion(QVirtIO9P *v9p, uint32_t msize, const char *version)
+{
+ P9Req *req = v9fs_req_init(v9p, 4 + v9fs_string_size(version), P9_TVERSION,
+ P9_NOTAG);
+
+ v9fs_uint32_write(req, msize);
+ v9fs_string_write(req, version);
+ v9fs_req_send(req);
+ return req;
+}
+
+/* size[4] Rversion tag[2] msize[4] version[s] */
+static void v9fs_rversion(P9Req *req, uint16_t *len, char **version)
+{
+ uint32_t msize;
+
+ v9fs_req_recv(req, P9_RVERSION);
+ v9fs_uint32_read(req, &msize);
+
+ g_assert_cmpint(msize, ==, P9_MAX_SIZE);
+
+ if (len || version) {
+ v9fs_string_read(req, len, version);
+ }
+
+ v9fs_req_free(req);
+}
+
+/* size[4] Tattach tag[2] fid[4] afid[4] uname[s] aname[s] n_uname[4] */
+static P9Req *v9fs_tattach(QVirtIO9P *v9p, uint32_t fid, uint32_t n_uname)
+{
+ const char *uname = ""; /* ignored by QEMU */
+ const char *aname = ""; /* ignored by QEMU */
+ P9Req *req = v9fs_req_init(v9p, 4 + 4 + 2 + 2 + 4, P9_TATTACH,
+ ++(v9p->p9_req_tag));
+
+ v9fs_uint32_write(req, fid);
+ v9fs_uint32_write(req, P9_NOFID);
+ v9fs_string_write(req, uname);
+ v9fs_string_write(req, aname);
+ v9fs_uint32_write(req, n_uname);
+ v9fs_req_send(req);
+ return req;
+}
+
+typedef char v9fs_qid[13];
+
+/* size[4] Rattach tag[2] qid[13] */
+static void v9fs_rattach(P9Req *req, v9fs_qid *qid)
+{
+ v9fs_req_recv(req, P9_RATTACH);
+ if (qid) {
+ v9fs_memread(req, qid, 13);
+ }
+ v9fs_req_free(req);
+}
+
+/* size[4] Twalk tag[2] fid[4] newfid[4] nwname[2] nwname*(wname[s]) */
+static P9Req *v9fs_twalk(QVirtIO9P *v9p, uint32_t fid, uint32_t newfid,
+ uint16_t nwname, char *const wnames[])
+{
+ P9Req *req;
+ int i;
+ uint32_t size = 4 + 4 + 2;
+
+ for (i = 0; i < nwname; i++) {
+ size += v9fs_string_size(wnames[i]);
+ }
+ req = v9fs_req_init(v9p, size, P9_TWALK, ++(v9p->p9_req_tag));
+ v9fs_uint32_write(req, fid);
+ v9fs_uint32_write(req, newfid);
+ v9fs_uint16_write(req, nwname);
+ for (i = 0; i < nwname; i++) {
+ v9fs_string_write(req, wnames[i]);
+ }
+ v9fs_req_send(req);
+ return req;
+}
+
+/* size[4] Rwalk tag[2] nwqid[2] nwqid*(wqid[13]) */
+static void v9fs_rwalk(P9Req *req, uint16_t *nwqid, v9fs_qid **wqid)
+{
+ uint16_t local_nwqid;
+
+ v9fs_req_recv(req, P9_RWALK);
+ v9fs_uint16_read(req, &local_nwqid);
+ if (nwqid) {
+ *nwqid = local_nwqid;
+ }
+ if (wqid) {
+ *wqid = g_malloc(local_nwqid * 13);
+ v9fs_memread(req, *wqid, local_nwqid * 13);
+ }
+ v9fs_req_free(req);
+}
+
+static void fs_version(QVirtIO9P *v9p)
+{
+ const char *version = "9P2000.L";
+ uint16_t server_len;
+ char *server_version;
+ P9Req *req;
+
+ req = v9fs_tversion(v9p, P9_MAX_SIZE, version);
+ v9fs_rversion(req, &server_len, &server_version);
+
+ g_assert_cmpmem(server_version, server_len, version, strlen(version));
+
+ g_free(server_version);
+}
+
+static void fs_attach(QVirtIO9P *v9p)
+{
+ P9Req *req;
+
+ fs_version(v9p);
+ req = v9fs_tattach(v9p, 0, getuid());
+ v9fs_rattach(req, NULL);
+}
+
+static void fs_walk(QVirtIO9P *v9p)
+{
+ char *wnames[P9_MAXWELEM], *paths[P9_MAXWELEM];
+ char *last_path = v9p->test_share;
+ uint16_t nwqid;
+ v9fs_qid *wqid;
+ int i;
+ P9Req *req;
+
+ for (i = 0; i < P9_MAXWELEM; i++) {
+ wnames[i] = g_strdup_printf("%s%d", __func__, i);
+ last_path = paths[i] = g_strdup_printf("%s/%s", last_path, wnames[i]);
+ g_assert(!mkdir(paths[i], 0700));
+ }
+
+ fs_attach(v9p);
+ req = v9fs_twalk(v9p, 0, 1, P9_MAXWELEM, wnames);
+ v9fs_rwalk(req, &nwqid, &wqid);
+
+ g_assert_cmpint(nwqid, ==, P9_MAXWELEM);
+
+ for (i = 0; i < P9_MAXWELEM; i++) {
+ rmdir(paths[P9_MAXWELEM - i - 1]);
+ g_free(paths[P9_MAXWELEM - i - 1]);
+ g_free(wnames[i]);
+ }
+
+ g_free(wqid);
+}
+
+static void fs_walk_no_slash(QVirtIO9P *v9p)
+{
+ char *const wnames[] = { g_strdup(" /") };
+ P9Req *req;
+ uint32_t err;
+
+ fs_attach(v9p);
+ req = v9fs_twalk(v9p, 0, 1, 1, wnames);
+ v9fs_rlerror(req, &err);
+
+ g_assert_cmpint(err, ==, ENOENT);
+
+ g_free(wnames[0]);
+}
+
+static void fs_walk_dotdot(QVirtIO9P *v9p)
+{
+ char *const wnames[] = { g_strdup("..") };
+ v9fs_qid root_qid, *wqid;
+ P9Req *req;
+
+ fs_version(v9p);
+ req = v9fs_tattach(v9p, 0, getuid());
+ v9fs_rattach(req, &root_qid);
+
+ req = v9fs_twalk(v9p, 0, 1, 1, wnames);
+ v9fs_rwalk(req, NULL, &wqid); /* We now we'll get one qid */
+
+ g_assert_cmpmem(&root_qid, 13, wqid[0], 13);
+
+ g_free(wqid);
+ g_free(wnames[0]);
+}
+
+typedef void (*v9fs_test_fn)(QVirtIO9P *v9p);
+
+static void v9fs_run_pci_test(gconstpointer data)
+{
+ v9fs_test_fn fn = data;
+ QVirtIO9P *v9p = qvirtio_9p_pci_start();
+
+ if (fn) {
+ fn(v9p);
+ }
+ qvirtio_9p_pci_stop(v9p);
+}
+
+static void v9fs_qtest_pci_add(const char *path, v9fs_test_fn fn)
+{
+ qtest_add_data_func(path, fn, v9fs_run_pci_test);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
- qtest_add_func("/virtio/9p/pci/nop", pci_nop);
- qtest_add_func("/virtio/9p/pci/basic/configuration", pci_basic_config);
+ v9fs_qtest_pci_add("/virtio/9p/pci/nop", NULL);
+ v9fs_qtest_pci_add("/virtio/9p/pci/config", pci_config);
+ v9fs_qtest_pci_add("/virtio/9p/pci/fs/version/basic", fs_version);
+ v9fs_qtest_pci_add("/virtio/9p/pci/fs/attach/basic", fs_attach);
+ v9fs_qtest_pci_add("/virtio/9p/pci/fs/walk/basic", fs_walk);
+ v9fs_qtest_pci_add("/virtio/9p/pci/fs/walk/no_slash", fs_walk_no_slash);
+ v9fs_qtest_pci_add("/virtio/9p/pci/fs/walk/dotdot_from_root",
+ fs_walk_dotdot);
return g_test_run();
}
diff --git a/trace-events b/trace-events
index f74e1d3d22..839a9d0fba 100644
--- a/trace-events
+++ b/trace-events
@@ -25,6 +25,12 @@
#
# The <format-string> should be a sprintf()-compatible format string.
+# aio-posix.c
+run_poll_handlers_begin(void *ctx, int64_t max_ns) "ctx %p max_ns %"PRId64
+run_poll_handlers_end(void *ctx, bool progress) "ctx %p progress %d"
+poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
+poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
+
# thread-pool.c
thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p"
thread_pool_complete(void *pool, void *req, void *opaque, int ret) "pool %p req %p opaque %p ret %d"
@@ -47,7 +53,7 @@ qemu_system_shutdown_request(void) ""
qemu_system_powerdown_request(void) ""
# spice-qemu-char.c
-spice_vmc_write(ssize_t out, int len) "spice wrottn %zd of requested %d"
+spice_vmc_write(ssize_t out, int len) "spice wrote %zd of requested %d"
spice_vmc_read(int bytes, int len) "spice read %d of requested %d"
spice_vmc_register_interface(void *scd) "spice vmc registered interface %p"
spice_vmc_unregister_interface(void *scd) "spice vmc unregistered interface %p"
@@ -135,6 +141,12 @@ memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t
# Targets: all
vcpu guest_cpu_enter(void)
+# Hot-unplug a virtual (guest) CPU
+#
+# Mode: user, softmmu
+# Targets: all
+vcpu guest_cpu_exit(void)
+
# Reset the state of a virtual (guest) CPU
#
# Mode: user, softmmu
diff --git a/trace/control-target.c b/trace/control-target.c
index 7ebf6e0bcb..e2e138a3f0 100644
--- a/trace/control-target.c
+++ b/trace/control-target.c
@@ -79,7 +79,7 @@ void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
}
}
-static bool adding_first_cpu(void)
+static bool adding_first_cpu1(void)
{
CPUState *cpu;
size_t count = 0;
@@ -92,6 +92,15 @@ static bool adding_first_cpu(void)
return true;
}
+static bool adding_first_cpu(void)
+{
+ bool res;
+ cpu_list_lock();
+ res = adding_first_cpu1();
+ cpu_list_unlock();
+ return res;
+}
+
void trace_init_vcpu(CPUState *vcpu)
{
TraceEventIter iter;
diff --git a/trace/control.c b/trace/control.c
index 1a7bee6ddc..56a2632584 100644
--- a/trace/control.c
+++ b/trace/control.c
@@ -26,6 +26,7 @@
#include "qemu/error-report.h"
#include "qemu/config-file.h"
#include "monitor/monitor.h"
+#include "trace.h"
int trace_events_enabled_count;
@@ -259,6 +260,24 @@ void trace_init_file(const char *file)
#endif
}
+void trace_fini_vcpu(CPUState *vcpu)
+{
+ TraceEventIter iter;
+ TraceEvent *ev;
+
+ trace_guest_cpu_exit(vcpu);
+
+ trace_event_iter_init(&iter, NULL);
+ while ((ev = trace_event_iter_next(&iter)) != NULL) {
+ if (trace_event_is_vcpu(ev) &&
+ trace_event_get_state_static(ev) &&
+ trace_event_get_vcpu_state_dynamic(vcpu, ev)) {
+ /* must disable to affect the global counter */
+ trace_event_set_vcpu_state_dynamic(vcpu, ev, false);
+ }
+ }
+}
+
bool trace_init_backends(void)
{
#ifdef CONFIG_TRACE_SIMPLE
diff --git a/trace/control.h b/trace/control.h
index ccaeac8552..4ea53e2986 100644
--- a/trace/control.h
+++ b/trace/control.h
@@ -202,6 +202,14 @@ void trace_init_file(const char *file);
void trace_init_vcpu(CPUState *vcpu);
/**
+ * trace_fini_vcpu:
+ * @vcpu: Removed vCPU.
+ *
+ * Disable dynamic event state for a hot-unplugged vCPU.
+ */
+void trace_fini_vcpu(CPUState *vcpu);
+
+/**
* trace_list_events:
*
* List all available events.
diff --git a/translate-all.c b/translate-all.c
index 3dd9214904..20262938bb 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -753,7 +753,7 @@ static inline void *alloc_code_gen_buffer(void)
size_t size2;
void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
PROT_NONE, flags, -1, 0);
- switch (buf2 != MAP_FAILED) {
+ switch ((int)(buf2 != MAP_FAILED)) {
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
diff --git a/ui/console.c b/ui/console.c
index ed888e55ea..b9575f2ee5 100644
--- a/ui/console.c
+++ b/ui/console.c
@@ -124,6 +124,7 @@ struct QemuConsole {
int dcls;
DisplayChangeListener *gl;
bool gl_block;
+ int window_id;
/* Graphic console state. */
Object *device;
@@ -273,6 +274,16 @@ void graphic_hw_gl_block(QemuConsole *con, bool block)
}
}
+int qemu_console_get_window_id(QemuConsole *con)
+{
+ return con->window_id;
+}
+
+void qemu_console_set_window_id(QemuConsole *con, int window_id)
+{
+ con->window_id = window_id;
+}
+
void graphic_hw_invalidate(QemuConsole *con)
{
if (!con) {
diff --git a/ui/curses.c b/ui/curses.c
index 2e132a7bfa..03cefdf470 100644
--- a/ui/curses.c
+++ b/ui/curses.c
@@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include <curses.h>
#ifndef _WIN32
#include <sys/ioctl.h>
diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c
index 79cee0503a..cd24568a5e 100644
--- a/ui/egl-helpers.c
+++ b/ui/egl-helpers.c
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2015-2016 Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
#include "qemu/osdep.h"
#include <glob.h>
#include <dirent.h>
diff --git a/ui/gtk.c b/ui/gtk.c
index a216216d8b..86368e38b7 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -90,6 +90,9 @@
#ifndef GDK_IS_X11_DISPLAY
#define GDK_IS_X11_DISPLAY(dpy) (dpy == dpy)
#endif
+#ifndef GDK_IS_WAYLAND_DISPLAY
+#define GDK_IS_WAYLAND_DISPLAY(dpy) (dpy == dpy)
+#endif
#ifndef GDK_IS_WIN32_DISPLAY
#define GDK_IS_WIN32_DISPLAY(dpy) (dpy == dpy)
#endif
@@ -1054,6 +1057,10 @@ static int gd_map_keycode(GtkDisplayState *s, GdkDisplay *dpy, int gdk_keycode)
qemu_keycode = translate_xfree86_keycode(gdk_keycode - 97);
}
#endif
+#ifdef GDK_WINDOWING_WAYLAND
+ } else if (GDK_IS_WAYLAND_DISPLAY(dpy) && gdk_keycode < 158) {
+ qemu_keycode = translate_evdev_keycode(gdk_keycode - 97);
+#endif
} else if (gdk_keycode == 208) { /* Hiragana_Katakana */
qemu_keycode = 0x70;
} else if (gdk_keycode == 211) { /* backslash */
@@ -1699,6 +1706,11 @@ static CharDriverState *gd_vc_handler(ChardevVC *vc, Error **errp)
ChardevCommon *common = qapi_ChardevVC_base(vc);
CharDriverState *chr;
+ if (nb_vcs == MAX_VCS) {
+ error_setg(errp, "Maximum number of consoles reached");
+ return NULL;
+ }
+
chr = qemu_chr_alloc(common, errp);
if (!chr) {
return NULL;
@@ -2171,6 +2183,8 @@ static gboolean gtkinit;
void gtk_display_init(DisplayState *ds, bool full_screen, bool grab_on_hover)
{
+ VirtualConsole *vc;
+
GtkDisplayState *s = g_malloc0(sizeof(*s));
char *filename;
GdkDisplay *window_display;
@@ -2249,9 +2263,11 @@ void gtk_display_init(DisplayState *ds, bool full_screen, bool grab_on_hover)
}
#endif
+ vc = gd_vc_find_current(s);
+ gtk_widget_set_sensitive(s->view_menu, vc != NULL);
#ifdef CONFIG_VTE
gtk_widget_set_sensitive(s->copy_item,
- gd_vc_find_current(s)->type == GD_VC_VTE);
+ vc && vc->type == GD_VC_VTE);
#endif
if (full_screen) {
diff --git a/ui/input-keymap.c b/ui/input-keymap.c
index f1e700d720..8a1476fc48 100644
--- a/ui/input-keymap.c
+++ b/ui/input-keymap.c
@@ -131,6 +131,9 @@ static const int qcode_to_number[] = {
[Q_KEY_CODE_DELETE] = 0xd3,
[Q_KEY_CODE_RO] = 0x73,
+ [Q_KEY_CODE_HIRAGANA] = 0x70,
+ [Q_KEY_CODE_HENKAN] = 0x79,
+ [Q_KEY_CODE_YEN] = 0x7d,
[Q_KEY_CODE_KP_COMMA] = 0x7e,
[Q_KEY_CODE__MAX] = 0,
diff --git a/ui/sdl.c b/ui/sdl.c
index d8cf5bcf74..19e8a848a7 100644
--- a/ui/sdl.c
+++ b/ui/sdl.c
@@ -947,6 +947,7 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame)
int flags;
uint8_t data = 0;
const SDL_VideoInfo *vi;
+ SDL_SysWMinfo info;
char *filename;
#if defined(__APPLE__)
@@ -1023,5 +1024,29 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame)
sdl_cursor_hidden = SDL_CreateCursor(&data, &data, 8, 1, 0, 0);
sdl_cursor_normal = SDL_GetCursor();
+ memset(&info, 0, sizeof(info));
+ SDL_VERSION(&info.version);
+ if (SDL_GetWMInfo(&info)) {
+ int i;
+ for (i = 0; ; i++) {
+ /* All consoles share the same window */
+ QemuConsole *con = qemu_console_lookup_by_index(i);
+ if (con) {
+#if defined(SDL_VIDEO_DRIVER_X11)
+ qemu_console_set_window_id(con, info.info.x11.wmwindow);
+#elif defined(SDL_VIDEO_DRIVER_NANOX) || \
+ defined(SDL_VIDEO_DRIVER_WINDIB) || defined(SDL_VIDEO_DRIVER_DDRAW) || \
+ defined(SDL_VIDEO_DRIVER_GAPI) || \
+ defined(SDL_VIDEO_DRIVER_RISCOS)
+ qemu_console_set_window_id(con, (int) (uintptr_t) info.window);
+#else
+ qemu_console_set_window_id(con, info.data);
+#endif
+ } else {
+ break;
+ }
+ }
+ }
+
atexit(sdl_cleanup);
}
diff --git a/ui/sdl2.c b/ui/sdl2.c
index 30d2a3c35d..91fb111aa5 100644
--- a/ui/sdl2.c
+++ b/ui/sdl2.c
@@ -761,6 +761,7 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame)
uint8_t data = 0;
char *filename;
int i;
+ SDL_SysWMinfo info;
if (no_frame) {
gui_noframe = 1;
@@ -786,6 +787,8 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame)
exit(1);
}
SDL_SetHint(SDL_HINT_GRAB_KEYBOARD, "1");
+ memset(&info, 0, sizeof(info));
+ SDL_VERSION(&info.version);
for (i = 0;; i++) {
QemuConsole *con = qemu_console_lookup_by_index(i);
@@ -813,6 +816,16 @@ void sdl_display_init(DisplayState *ds, int full_screen, int no_frame)
#endif
sdl2_console[i].dcl.con = con;
register_displaychangelistener(&sdl2_console[i].dcl);
+
+#if defined(SDL_VIDEO_DRIVER_WINDOWS) || defined(SDL_VIDEO_DRIVER_X11)
+ if (SDL_GetWindowWMInfo(sdl2_console[i].real_window, &info)) {
+#if defined(SDL_VIDEO_DRIVER_WINDOWS)
+ qemu_console_set_window_id(con, (uintptr_t)info.info.win.window);
+#elif defined(SDL_VIDEO_DRIVER_X11)
+ qemu_console_set_window_id(con, info.info.x11.window);
+#endif
+ }
+#endif
}
/* Load a 32x32x4 image. White pixels are transparent. */
diff --git a/ui/vnc-auth-vencrypt.c b/ui/vnc-auth-vencrypt.c
index c0c29a5119..ffaab57550 100644
--- a/ui/vnc-auth-vencrypt.c
+++ b/ui/vnc-auth-vencrypt.c
@@ -65,16 +65,17 @@ static void start_auth_vencrypt_subauth(VncState *vs)
}
}
-static void vnc_tls_handshake_done(Object *source,
- Error *err,
+static void vnc_tls_handshake_done(QIOTask *task,
gpointer user_data)
{
VncState *vs = user_data;
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
VNC_DEBUG("Handshake failed %s\n",
error_get_pretty(err));
vnc_client_error(vs);
+ error_free(err);
} else {
vs->ioc_tag = qio_channel_add_watch(
vs->ioc, G_IO_IN | G_IO_OUT, vnc_client_io, vs, NULL);
diff --git a/ui/vnc-ws.c b/ui/vnc-ws.c
index bffb484a8d..f530cd5474 100644
--- a/ui/vnc-ws.c
+++ b/ui/vnc-ws.c
@@ -24,15 +24,16 @@
#include "io/channel-websock.h"
#include "qemu/bswap.h"
-static void vncws_tls_handshake_done(Object *source,
- Error *err,
+static void vncws_tls_handshake_done(QIOTask *task,
gpointer user_data)
{
VncState *vs = user_data;
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
VNC_DEBUG("Handshake failed %s\n", error_get_pretty(err));
vnc_client_error(vs);
+ error_free(err);
} else {
VNC_DEBUG("TLS handshake complete, starting websocket handshake\n");
vs->ioc_tag = qio_channel_add_watch(
@@ -83,15 +84,16 @@ gboolean vncws_tls_handshake_io(QIOChannel *ioc G_GNUC_UNUSED,
}
-static void vncws_handshake_done(Object *source,
- Error *err,
+static void vncws_handshake_done(QIOTask *task,
gpointer user_data)
{
VncState *vs = user_data;
+ Error *err = NULL;
- if (err) {
+ if (qio_task_propagate_error(task, &err)) {
VNC_DEBUG("Websock handshake failed %s\n", error_get_pretty(err));
vnc_client_error(vs);
+ error_free(err);
} else {
VNC_DEBUG("Websock handshake complete, starting VNC protocol\n");
vnc_start_protocol(vs);
diff --git a/ui/vnc.c b/ui/vnc.c
index 2c28a59ff7..29aa9c4c97 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -2459,10 +2459,14 @@ static int protocol_client_init(VncState *vs, uint8_t *data, size_t len)
pixel_format_message(vs);
- if (qemu_name)
+ if (qemu_name) {
size = snprintf(buf, sizeof(buf), "QEMU (%s)", qemu_name);
- else
+ if (size > sizeof(buf)) {
+ size = sizeof(buf);
+ }
+ } else {
size = snprintf(buf, sizeof(buf), "QEMU");
+ }
vnc_write_u32(vs, size);
vnc_write(vs, buf, size);
diff --git a/user-exec-stub.c b/user-exec-stub.c
new file mode 100644
index 0000000000..dbcf1ade9c
--- /dev/null
+++ b/user-exec-stub.c
@@ -0,0 +1,34 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qom/cpu.h"
+#include "sysemu/replay.h"
+
+void cpu_resume(CPUState *cpu)
+{
+}
+
+void qemu_init_vcpu(CPUState *cpu)
+{
+}
+
+/* User mode emulation does not support record/replay yet. */
+
+bool replay_exception(void)
+{
+ return true;
+}
+
+bool replay_has_exception(void)
+{
+ return false;
+}
+
+bool replay_interrupt(void)
+{
+ return true;
+}
+
+bool replay_has_interrupt(void)
+{
+ return false;
+}
diff --git a/util/Makefile.objs b/util/Makefile.objs
index ad0f9c7fe4..c1f247d675 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -1,5 +1,6 @@
util-obj-y = osdep.o cutils.o unicode.o qemu-timer-common.o
util-obj-y += bufferiszero.o
+util-obj-y += lockcnt.o
util-obj-$(CONFIG_POSIX) += compatfd.o
util-obj-$(CONFIG_POSIX) += event_notifier-posix.o
util-obj-$(CONFIG_POSIX) += mmap-alloc.o
diff --git a/util/bitmap.c b/util/bitmap.c
index 43ed011720..c1a84ca5e3 100644
--- a/util/bitmap.c
+++ b/util/bitmap.c
@@ -164,6 +164,8 @@ void bitmap_set(unsigned long *map, long start, long nr)
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+ assert(start >= 0 && nr >= 0);
+
while (nr - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
@@ -184,6 +186,8 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
+ assert(start >= 0 && nr >= 0);
+
/* First word */
if (nr - bits_to_set > 0) {
atomic_or(p, mask_to_set);
@@ -221,6 +225,8 @@ void bitmap_clear(unsigned long *map, long start, long nr)
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
+ assert(start >= 0 && nr >= 0);
+
while (nr - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
@@ -243,6 +249,8 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
unsigned long dirty = 0;
unsigned long old_bits;
+ assert(start >= 0 && nr >= 0);
+
/* First word */
if (nr - bits_to_clear > 0) {
old_bits = atomic_fetch_and(p, ~mask_to_clear);
diff --git a/util/event_notifier-posix.c b/util/event_notifier-posix.c
index c1f0d79b34..7e40252ade 100644
--- a/util/event_notifier-posix.c
+++ b/util/event_notifier-posix.c
@@ -90,15 +90,6 @@ int event_notifier_get_fd(const EventNotifier *e)
return e->rfd;
}
-int event_notifier_set_handler(EventNotifier *e,
- bool is_external,
- EventNotifierHandler *handler)
-{
- aio_set_fd_handler(iohandler_get_aio_context(), e->rfd, is_external,
- (IOHandler *)handler, NULL, e);
- return 0;
-}
-
int event_notifier_set(EventNotifier *e)
{
static const uint64_t value = 1;
diff --git a/util/event_notifier-win32.c b/util/event_notifier-win32.c
index de87df02d6..519fb59123 100644
--- a/util/event_notifier-win32.c
+++ b/util/event_notifier-win32.c
@@ -32,18 +32,6 @@ HANDLE event_notifier_get_handle(EventNotifier *e)
return e->event;
}
-int event_notifier_set_handler(EventNotifier *e,
- bool is_external,
- EventNotifierHandler *handler)
-{
- if (handler) {
- return qemu_add_wait_object(e->event, (IOHandler *)handler, e);
- } else {
- qemu_del_wait_object(e->event, (IOHandler *)handler, e);
- return 0;
- }
-}
-
int event_notifier_set(EventNotifier *e)
{
SetEvent(e->event);
diff --git a/util/lockcnt.c b/util/lockcnt.c
new file mode 100644
index 0000000000..4f88dcf8b8
--- /dev/null
+++ b/util/lockcnt.c
@@ -0,0 +1,397 @@
+/*
+ * QemuLockCnt implementation
+ *
+ * Copyright Red Hat, Inc. 2017
+ *
+ * Author:
+ * Paolo Bonzini <pbonzini@redhat.com>
+ */
+#include "qemu/osdep.h"
+#include "qemu/thread.h"
+#include "qemu/atomic.h"
+#include "trace.h"
+
+#ifdef CONFIG_LINUX
+#include "qemu/futex.h"
+
+/* On Linux, bits 0-1 are a futex-based lock, bits 2-31 are the counter.
+ * For the mutex algorithm see Ulrich Drepper's "Futexes Are Tricky" (ok,
+ * this is not the most relaxing citation I could make...). It is similar
+ * to mutex2 in the paper.
+ */
+
+#define QEMU_LOCKCNT_STATE_MASK 3
+#define QEMU_LOCKCNT_STATE_FREE 0 /* free, uncontended */
+#define QEMU_LOCKCNT_STATE_LOCKED 1 /* locked, uncontended */
+#define QEMU_LOCKCNT_STATE_WAITING 2 /* locked, contended */
+
+#define QEMU_LOCKCNT_COUNT_STEP 4
+#define QEMU_LOCKCNT_COUNT_SHIFT 2
+
+void qemu_lockcnt_init(QemuLockCnt *lockcnt)
+{
+ lockcnt->count = 0;
+}
+
+void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
+{
+}
+
+/* *val is the current value of lockcnt->count.
+ *
+ * If the lock is free, try a cmpxchg from *val to new_if_free; return
+ * true and set *val to the old value found by the cmpxchg in
+ * lockcnt->count.
+ *
+ * If the lock is taken, wait for it to be released and return false
+ * *without trying again to take the lock*. Again, set *val to the
+ * new value of lockcnt->count.
+ *
+ * If *waited is true on return, new_if_free's bottom two bits must not
+ * be QEMU_LOCKCNT_STATE_LOCKED on subsequent calls, because the caller
+ * does not know if there are other waiters. Furthermore, after *waited
+ * is set the caller has effectively acquired the lock. If it returns
+ * with the lock not taken, it must wake another futex waiter.
+ */
+static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
+ int new_if_free, bool *waited)
+{
+ /* Fast path for when the lock is free. */
+ if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_FREE) {
+ int expected = *val;
+
+ trace_lockcnt_fast_path_attempt(lockcnt, expected, new_if_free);
+ *val = atomic_cmpxchg(&lockcnt->count, expected, new_if_free);
+ if (*val == expected) {
+ trace_lockcnt_fast_path_success(lockcnt, expected, new_if_free);
+ *val = new_if_free;
+ return true;
+ }
+ }
+
+ /* The slow path moves from locked to waiting if necessary, then
+ * does a futex wait. Both steps can be repeated ad nauseam,
+ * only getting out of the loop if we can have another shot at the
+ * fast path. Once we can, get out to compute the new destination
+ * value for the fast path.
+ */
+ while ((*val & QEMU_LOCKCNT_STATE_MASK) != QEMU_LOCKCNT_STATE_FREE) {
+ if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_LOCKED) {
+ int expected = *val;
+ int new = expected - QEMU_LOCKCNT_STATE_LOCKED + QEMU_LOCKCNT_STATE_WAITING;
+
+ trace_lockcnt_futex_wait_prepare(lockcnt, expected, new);
+ *val = atomic_cmpxchg(&lockcnt->count, expected, new);
+ if (*val == expected) {
+ *val = new;
+ }
+ continue;
+ }
+
+ if ((*val & QEMU_LOCKCNT_STATE_MASK) == QEMU_LOCKCNT_STATE_WAITING) {
+ *waited = true;
+ trace_lockcnt_futex_wait(lockcnt, *val);
+ qemu_futex_wait(&lockcnt->count, *val);
+ *val = atomic_read(&lockcnt->count);
+ trace_lockcnt_futex_wait_resume(lockcnt, *val);
+ continue;
+ }
+
+ abort();
+ }
+ return false;
+}
+
+static void lockcnt_wake(QemuLockCnt *lockcnt)
+{
+ trace_lockcnt_futex_wake(lockcnt);
+ qemu_futex_wake(&lockcnt->count, 1);
+}
+
+void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
+{
+ int val = atomic_read(&lockcnt->count);
+ bool waited = false;
+
+ for (;;) {
+ if (val >= QEMU_LOCKCNT_COUNT_STEP) {
+ int expected = val;
+ val = atomic_cmpxchg(&lockcnt->count, val, val + QEMU_LOCKCNT_COUNT_STEP);
+ if (val == expected) {
+ break;
+ }
+ } else {
+ /* The fast path is (0, unlocked)->(1, unlocked). */
+ if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, QEMU_LOCKCNT_COUNT_STEP,
+ &waited)) {
+ break;
+ }
+ }
+ }
+
+ /* If we were woken by another thread, we should also wake one because
+ * we are effectively releasing the lock that was given to us. This is
+ * the case where qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING
+ * in the low bits, and qemu_lockcnt_inc_and_unlock would find it and
+ * wake someone.
+ */
+ if (waited) {
+ lockcnt_wake(lockcnt);
+ }
+}
+
+void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
+{
+ atomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP);
+}
+
+/* Decrement a counter, and return locked if it is decremented to zero.
+ * If the function returns true, it is impossible for the counter to
+ * become nonzero until the next qemu_lockcnt_unlock.
+ */
+bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
+{
+ int val = atomic_read(&lockcnt->count);
+ int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
+ bool waited = false;
+
+ for (;;) {
+ if (val >= 2 * QEMU_LOCKCNT_COUNT_STEP) {
+ int expected = val;
+ val = atomic_cmpxchg(&lockcnt->count, val, val - QEMU_LOCKCNT_COUNT_STEP);
+ if (val == expected) {
+ break;
+ }
+ } else {
+ /* If count is going 1->0, take the lock. The fast path is
+ * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
+ */
+ if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
+ return true;
+ }
+
+ if (waited) {
+ /* At this point we do not know if there are more waiters. Assume
+ * there are.
+ */
+ locked_state = QEMU_LOCKCNT_STATE_WAITING;
+ }
+ }
+ }
+
+ /* If we were woken by another thread, but we're returning in unlocked
+ * state, we should also wake a thread because we are effectively
+ * releasing the lock that was given to us. This is the case where
+ * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
+ * bits, and qemu_lockcnt_unlock would find it and wake someone.
+ */
+ if (waited) {
+ lockcnt_wake(lockcnt);
+ }
+ return false;
+}
+
+/* If the counter is one, decrement it and return locked. Otherwise do
+ * nothing.
+ *
+ * If the function returns true, it is impossible for the counter to
+ * become nonzero until the next qemu_lockcnt_unlock.
+ */
+bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
+{
+ int val = atomic_read(&lockcnt->count);
+ int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
+ bool waited = false;
+
+ while (val < 2 * QEMU_LOCKCNT_COUNT_STEP) {
+ /* If count is going 1->0, take the lock. The fast path is
+ * (1, unlocked)->(0, locked) or (1, unlocked)->(0, waiting).
+ */
+ if (qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, locked_state, &waited)) {
+ return true;
+ }
+
+ if (waited) {
+ /* At this point we do not know if there are more waiters. Assume
+ * there are.
+ */
+ locked_state = QEMU_LOCKCNT_STATE_WAITING;
+ }
+ }
+
+ /* If we were woken by another thread, but we're returning in unlocked
+ * state, we should also wake a thread because we are effectively
+ * releasing the lock that was given to us. This is the case where
+ * qemu_lockcnt_lock would leave QEMU_LOCKCNT_STATE_WAITING in the low
+ * bits, and qemu_lockcnt_inc_and_unlock would find it and wake someone.
+ */
+ if (waited) {
+ lockcnt_wake(lockcnt);
+ }
+ return false;
+}
+
+void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
+{
+ int val = atomic_read(&lockcnt->count);
+ int step = QEMU_LOCKCNT_STATE_LOCKED;
+ bool waited = false;
+
+ /* The third argument is only used if the low bits of val are 0
+ * (QEMU_LOCKCNT_STATE_FREE), so just blindly mix in the desired
+ * state.
+ */
+ while (!qemu_lockcnt_cmpxchg_or_wait(lockcnt, &val, val + step, &waited)) {
+ if (waited) {
+ /* At this point we do not know if there are more waiters. Assume
+ * there are.
+ */
+ step = QEMU_LOCKCNT_STATE_WAITING;
+ }
+ }
+}
+
+void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
+{
+ int expected, new, val;
+
+ val = atomic_read(&lockcnt->count);
+ do {
+ expected = val;
+ new = (val + QEMU_LOCKCNT_COUNT_STEP) & ~QEMU_LOCKCNT_STATE_MASK;
+ trace_lockcnt_unlock_attempt(lockcnt, val, new);
+ val = atomic_cmpxchg(&lockcnt->count, val, new);
+ } while (val != expected);
+
+ trace_lockcnt_unlock_success(lockcnt, val, new);
+ if (val & QEMU_LOCKCNT_STATE_WAITING) {
+ lockcnt_wake(lockcnt);
+ }
+}
+
+void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
+{
+ int expected, new, val;
+
+ val = atomic_read(&lockcnt->count);
+ do {
+ expected = val;
+ new = val & ~QEMU_LOCKCNT_STATE_MASK;
+ trace_lockcnt_unlock_attempt(lockcnt, val, new);
+ val = atomic_cmpxchg(&lockcnt->count, val, new);
+ } while (val != expected);
+
+ trace_lockcnt_unlock_success(lockcnt, val, new);
+ if (val & QEMU_LOCKCNT_STATE_WAITING) {
+ lockcnt_wake(lockcnt);
+ }
+}
+
+unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
+{
+ return atomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT;
+}
+#else
+void qemu_lockcnt_init(QemuLockCnt *lockcnt)
+{
+ qemu_mutex_init(&lockcnt->mutex);
+ lockcnt->count = 0;
+}
+
+void qemu_lockcnt_destroy(QemuLockCnt *lockcnt)
+{
+ qemu_mutex_destroy(&lockcnt->mutex);
+}
+
+void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
+{
+ int old;
+ for (;;) {
+ old = atomic_read(&lockcnt->count);
+ if (old == 0) {
+ qemu_lockcnt_lock(lockcnt);
+ qemu_lockcnt_inc_and_unlock(lockcnt);
+ return;
+ } else {
+ if (atomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
+ return;
+ }
+ }
+ }
+}
+
+void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
+{
+ atomic_dec(&lockcnt->count);
+}
+
+/* Decrement a counter, and return locked if it is decremented to zero.
+ * It is impossible for the counter to become nonzero while the mutex
+ * is taken.
+ */
+bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
+{
+ int val = atomic_read(&lockcnt->count);
+ while (val > 1) {
+ int old = atomic_cmpxchg(&lockcnt->count, val, val - 1);
+ if (old != val) {
+ val = old;
+ continue;
+ }
+
+ return false;
+ }
+
+ qemu_lockcnt_lock(lockcnt);
+ if (atomic_fetch_dec(&lockcnt->count) == 1) {
+ return true;
+ }
+
+ qemu_lockcnt_unlock(lockcnt);
+ return false;
+}
+
+/* Decrement a counter and return locked if it is decremented to zero.
+ * Otherwise do nothing.
+ *
+ * It is impossible for the counter to become nonzero while the mutex
+ * is taken.
+ */
+bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
+{
+ /* No need for acquire semantics if we return false. */
+ int val = atomic_read(&lockcnt->count);
+ if (val > 1) {
+ return false;
+ }
+
+ qemu_lockcnt_lock(lockcnt);
+ if (atomic_fetch_dec(&lockcnt->count) == 1) {
+ return true;
+ }
+
+ qemu_lockcnt_inc_and_unlock(lockcnt);
+ return false;
+}
+
+void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
+{
+ qemu_mutex_lock(&lockcnt->mutex);
+}
+
+void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
+{
+ atomic_inc(&lockcnt->count);
+ qemu_mutex_unlock(&lockcnt->mutex);
+}
+
+void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
+{
+ qemu_mutex_unlock(&lockcnt->mutex);
+}
+
+unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
+{
+ return atomic_read(&lockcnt->count);
+}
+#endif
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index 5a85aa3c89..2f55f5e94f 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -12,6 +12,7 @@
#include "qemu/osdep.h"
#include "qemu/mmap-alloc.h"
+#include "qemu/host-utils.h"
#define HUGETLBFS_MAGIC 0x958458f6
@@ -61,18 +62,18 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
#else
void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
#endif
- size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
+ size_t offset;
void *ptr1;
if (ptr == MAP_FAILED) {
return MAP_FAILED;
}
- /* Make sure align is a power of 2 */
- assert(!(align & (align - 1)));
+ assert(is_power_of_2(align));
/* Always align to host page size */
assert(align >= getpagesize());
+ offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE,
MAP_FIXED |
(fd == -1 ? MAP_ANONYMOUS : 0) |
@@ -83,22 +84,20 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
return MAP_FAILED;
}
- ptr += offset;
- total -= offset;
-
if (offset > 0) {
- munmap(ptr - offset, offset);
+ munmap(ptr, offset);
}
/*
* Leave a single PROT_NONE page allocated after the RAM block, to serve as
* a guard page guarding against potential buffer overflows.
*/
+ total -= offset;
if (total > size + getpagesize()) {
- munmap(ptr + size + getpagesize(), total - size - getpagesize());
+ munmap(ptr1 + size + getpagesize(), total - size - getpagesize());
}
- return ptr;
+ return ptr1;
}
void qemu_ram_munmap(void *ptr, size_t size)
diff --git a/util/oslib-win32.c b/util/oslib-win32.c
index d09863cc9d..0b1890fd33 100644
--- a/util/oslib-win32.c
+++ b/util/oslib-win32.c
@@ -327,6 +327,7 @@ char *qemu_get_exec_dir(void)
return g_strdup(exec_dir);
}
+#if !GLIB_CHECK_VERSION(2, 50, 0)
/*
* The original implementation of g_poll from glib has a problem on Windows
* when using timeouts < 10 ms.
@@ -530,6 +531,7 @@ gint g_poll(GPollFD *fds, guint nfds, gint timeout)
return retval;
}
+#endif
int getpagesize(void)
{
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
index 737bffa984..a5d2f6c0c3 100644
--- a/util/qemu-coroutine.c
+++ b/util/qemu-coroutine.c
@@ -131,6 +131,13 @@ void qemu_coroutine_enter(Coroutine *co)
}
}
+void qemu_coroutine_enter_if_inactive(Coroutine *co)
+{
+ if (!qemu_coroutine_entered(co)) {
+ qemu_coroutine_enter(co);
+ }
+}
+
void coroutine_fn qemu_coroutine_yield(void)
{
Coroutine *self = qemu_coroutine_self();
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index fe1d07aaef..7c120c45ce 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -38,6 +38,10 @@
# define AI_V4MAPPED 0
#endif
+#ifndef AI_NUMERICSERV
+# define AI_NUMERICSERV 0
+#endif
+
static int inet_getport(struct addrinfo *e)
{
@@ -110,8 +114,8 @@ NetworkAddressFamily inet_netfamily(int family)
* outside scope of this method and not currently handled by
* callers at all.
*/
-static int inet_ai_family_from_address(InetSocketAddress *addr,
- Error **errp)
+int inet_ai_family_from_address(InetSocketAddress *addr,
+ Error **errp)
{
if (addr->has_ipv6 && addr->has_ipv4 &&
!addr->ipv6 && !addr->ipv4) {
@@ -141,6 +145,9 @@ static int inet_listen_saddr(InetSocketAddress *saddr,
memset(&ai,0, sizeof(ai));
ai.ai_flags = AI_PASSIVE;
+ if (saddr->has_numeric && saddr->numeric) {
+ ai.ai_flags |= AI_NUMERICHOST | AI_NUMERICSERV;
+ }
ai.ai_family = inet_ai_family_from_address(saddr, &err);
ai.ai_socktype = SOCK_STREAM;
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index d20cddec0c..73e3a0edf5 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -11,10 +11,6 @@
*
*/
#include "qemu/osdep.h"
-#ifdef __linux__
-#include <sys/syscall.h>
-#include <linux/futex.h>
-#endif
#include "qemu/thread.h"
#include "qemu/atomic.h"
#include "qemu/notify.h"
@@ -294,28 +290,9 @@ void qemu_sem_wait(QemuSemaphore *sem)
}
#ifdef __linux__
-#define futex(...) syscall(__NR_futex, __VA_ARGS__)
-
-static inline void futex_wake(QemuEvent *ev, int n)
-{
- futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
-}
-
-static inline void futex_wait(QemuEvent *ev, unsigned val)
-{
- while (futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0)) {
- switch (errno) {
- case EWOULDBLOCK:
- return;
- case EINTR:
- break; /* get out of switch and retry */
- default:
- abort();
- }
- }
-}
+#include "qemu/futex.h"
#else
-static inline void futex_wake(QemuEvent *ev, int n)
+static inline void qemu_futex_wake(QemuEvent *ev, int n)
{
pthread_mutex_lock(&ev->lock);
if (n == 1) {
@@ -326,7 +303,7 @@ static inline void futex_wake(QemuEvent *ev, int n)
pthread_mutex_unlock(&ev->lock);
}
-static inline void futex_wait(QemuEvent *ev, unsigned val)
+static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
{
pthread_mutex_lock(&ev->lock);
if (ev->value == val) {
@@ -338,7 +315,7 @@ static inline void futex_wait(QemuEvent *ev, unsigned val)
/* Valid transitions:
* - free->set, when setting the event
- * - busy->set, when setting the event, followed by futex_wake
+ * - busy->set, when setting the event, followed by qemu_futex_wake
* - set->free, when resetting the event
* - free->busy, when waiting
*
@@ -381,7 +358,7 @@ void qemu_event_set(QemuEvent *ev)
if (atomic_read(&ev->value) != EV_SET) {
if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
/* There were waiters, wake them up. */
- futex_wake(ev, INT_MAX);
+ qemu_futex_wake(ev, INT_MAX);
}
}
}
@@ -419,7 +396,7 @@ void qemu_event_wait(QemuEvent *ev)
return;
}
}
- futex_wait(ev, EV_BUSY);
+ qemu_futex_wait(ev, EV_BUSY);
}
}
@@ -481,12 +458,6 @@ void qemu_thread_create(QemuThread *thread, const char *name,
if (err) {
error_exit(err, __func__);
}
- if (mode == QEMU_THREAD_DETACHED) {
- err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
- if (err) {
- error_exit(err, __func__);
- }
- }
/* Leave signal handling to the iothread. */
sigfillset(&set);
@@ -499,6 +470,12 @@ void qemu_thread_create(QemuThread *thread, const char *name,
qemu_thread_set_name(thread, name);
}
+ if (mode == QEMU_THREAD_DETACHED) {
+ err = pthread_detach(thread->thread);
+ if (err) {
+ error_exit(err, __func__);
+ }
+ }
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
pthread_attr_destroy(&attr);
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index 728e76b5b2..29c3e4dd85 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -269,7 +269,7 @@ void qemu_sem_wait(QemuSemaphore *sem)
*
* Valid transitions:
* - free->set, when setting the event
- * - busy->set, when setting the event, followed by futex_wake
+ * - busy->set, when setting the event, followed by SetEvent
* - set->free, when resetting the event
* - free->busy, when waiting
*
@@ -497,8 +497,8 @@ HANDLE qemu_thread_get_handle(QemuThread *thread)
EnterCriticalSection(&data->cs);
if (!data->exited) {
- handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE,
- thread->tid);
+ handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
+ THREAD_SET_CONTEXT, FALSE, thread->tid);
} else {
handle = NULL;
}
diff --git a/util/trace-events b/util/trace-events
index ed06aee2ec..2b8aa30739 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -30,3 +30,13 @@ qemu_anon_ram_free(void *ptr, size_t size) "ptr %p size %zu"
hbitmap_iter_skip_words(const void *hb, void *hbi, uint64_t pos, unsigned long cur) "hb %p hbi %p pos %"PRId64" cur 0x%lx"
hbitmap_reset(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64
hbitmap_set(void *hb, uint64_t start, uint64_t count, uint64_t sbit, uint64_t ebit) "hb %p items %"PRIu64",%"PRIu64" bits %"PRIu64"..%"PRIu64
+
+# util/lockcnt.c
+lockcnt_fast_path_attempt(const void *lockcnt, int expected, int new) "lockcnt %p fast path %d->%d"
+lockcnt_fast_path_success(const void *lockcnt, int expected, int new) "lockcnt %p fast path %d->%d succeeded"
+lockcnt_unlock_attempt(const void *lockcnt, int expected, int new) "lockcnt %p unlock %d->%d"
+lockcnt_unlock_success(const void *lockcnt, int expected, int new) "lockcnt %p unlock %d->%d succeeded"
+lockcnt_futex_wait_prepare(const void *lockcnt, int expected, int new) "lockcnt %p preparing slow path %d->%d"
+lockcnt_futex_wait(const void *lockcnt, int val) "lockcnt %p waiting on %d"
+lockcnt_futex_wait_resume(const void *lockcnt, int new) "lockcnt %p after wait: %d"
+lockcnt_futex_wake(const void *lockcnt) "lockcnt %p waking up one waiter"
diff --git a/util/uri.c b/util/uri.c
index 70a9cbcbd2..21b1828170 100644
--- a/util/uri.c
+++ b/util/uri.c
@@ -342,7 +342,7 @@ rfc3986_parse_port(URI *uri, const char **str)
* @uri: pointer to an URI structure
* @str: the string to analyze
*
- * Parse an user informations part and fills in the appropriate fields
+ * Parse a user information part and fill in the appropriate fields
* of the @uri structure
*
* userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
@@ -508,7 +508,7 @@ rfc3986_parse_authority(URI *uri, const char **str)
cur = *str;
/*
- * try to parse an userinfo and check for the trailing @
+ * try to parse a userinfo and check for the trailing @
*/
ret = rfc3986_parse_user_info(uri, &cur);
if ((ret != 0) || (*cur != '@'))
diff --git a/vl.c b/vl.c
index d77dd862f9..68e8c003d1 100644
--- a/vl.c
+++ b/vl.c
@@ -65,6 +65,7 @@ int main(int argc, char **argv)
#include "hw/bt.h"
#include "sysemu/watchdog.h"
#include "hw/smbios/smbios.h"
+#include "hw/acpi/acpi.h"
#include "hw/xen/xen.h"
#include "hw/qdev.h"
#include "hw/loader.h"
@@ -92,6 +93,7 @@ int main(int argc, char **argv)
#include "sysemu/cpus.h"
#include "migration/colo.h"
#include "sysemu/kvm.h"
+#include "sysemu/hax.h"
#include "qapi/qmp/qjson.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
@@ -180,6 +182,7 @@ bool boot_strict;
uint8_t *boot_splash_filedata;
size_t boot_splash_filedata_size;
uint8_t qemu_extra_params_fw[2];
+int only_migratable; /* turn it off unless user states otherwise */
int icount_align_option;
@@ -1277,11 +1280,6 @@ static void smp_parse(QemuOpts *opts)
max_cpus = qemu_opt_get_number(opts, "maxcpus", cpus);
- if (max_cpus > MAX_CPUMASK_BITS) {
- error_report("unsupported number of maxcpus");
- exit(1);
- }
-
if (max_cpus < cpus) {
error_report("maxcpus must be equal to or greater than smp");
exit(1);
@@ -1635,16 +1633,6 @@ void vm_state_notify(int running, RunState state)
}
}
-/* reset/shutdown handler */
-
-typedef struct QEMUResetEntry {
- QTAILQ_ENTRY(QEMUResetEntry) entry;
- QEMUResetHandler *func;
- void *opaque;
-} QEMUResetEntry;
-
-static QTAILQ_HEAD(reset_handlers, QEMUResetEntry) reset_handlers =
- QTAILQ_HEAD_INITIALIZER(reset_handlers);
static int reset_requested;
static int shutdown_requested, shutdown_signal = -1;
static pid_t shutdown_pid;
@@ -1734,38 +1722,6 @@ static int qemu_debug_requested(void)
return r;
}
-void qemu_register_reset(QEMUResetHandler *func, void *opaque)
-{
- QEMUResetEntry *re = g_malloc0(sizeof(QEMUResetEntry));
-
- re->func = func;
- re->opaque = opaque;
- QTAILQ_INSERT_TAIL(&reset_handlers, re, entry);
-}
-
-void qemu_unregister_reset(QEMUResetHandler *func, void *opaque)
-{
- QEMUResetEntry *re;
-
- QTAILQ_FOREACH(re, &reset_handlers, entry) {
- if (re->func == func && re->opaque == opaque) {
- QTAILQ_REMOVE(&reset_handlers, re, entry);
- g_free(re);
- return;
- }
- }
-}
-
-void qemu_devices_reset(void)
-{
- QEMUResetEntry *re, *nre;
-
- /* reset all devices */
- QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) {
- re->func(re->opaque);
- }
-}
-
void qemu_system_reset(bool report)
{
MachineClass *mc;
@@ -1959,7 +1915,7 @@ static void main_loop(void)
int64_t ti;
#endif
do {
- nonblocking = !kvm_enabled() && !xen_enabled() && last_io > 0;
+ nonblocking = tcg_enabled() && last_io > 0;
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
@@ -2859,7 +2815,8 @@ static bool object_create_initial(const char *type)
g_str_equal(type, "filter-mirror") ||
g_str_equal(type, "filter-redirector") ||
g_str_equal(type, "colo-compare") ||
- g_str_equal(type, "filter-rewriter")) {
+ g_str_equal(type, "filter-rewriter") ||
+ g_str_equal(type, "filter-replay")) {
return false;
}
@@ -2994,6 +2951,18 @@ static int global_init_func(void *opaque, QemuOpts *opts, Error **errp)
return 0;
}
+static int qemu_read_default_config_file(void)
+{
+ int ret;
+
+ ret = qemu_read_config_file(CONFIG_QEMU_CONFDIR "/qemu.conf");
+ if (ret < 0 && ret != -ENOENT) {
+ return ret;
+ }
+
+ return 0;
+}
+
int main(int argc, char **argv, char **envp)
{
int i;
@@ -3121,10 +3090,8 @@ int main(int argc, char **argv, char **envp)
}
}
- if (defconfig) {
- int ret;
- ret = qemu_read_default_config_files(userconfig);
- if (ret < 0) {
+ if (defconfig && userconfig) {
+ if (qemu_read_default_config_file() < 0) {
exit(1);
}
}
@@ -3703,7 +3670,7 @@ int main(int argc, char **argv, char **envp)
if (!opts) {
exit(1);
}
- do_acpitable_option(opts);
+ acpi_table_add(opts, &error_fatal);
break;
case QEMU_OPTION_smbios:
opts = qemu_opts_parse_noisily(qemu_find_opts("smbios"),
@@ -3711,7 +3678,7 @@ int main(int argc, char **argv, char **envp)
if (!opts) {
exit(1);
}
- do_smbios_option(opts);
+ smbios_entry_add(opts, &error_fatal);
break;
case QEMU_OPTION_fwcfg:
opts = qemu_opts_parse_noisily(qemu_find_opts("fw_cfg"),
@@ -3724,6 +3691,10 @@ int main(int argc, char **argv, char **envp)
olist = qemu_find_opts("machine");
qemu_opts_parse_noisily(olist, "accel=kvm", false);
break;
+ case QEMU_OPTION_enable_hax:
+ olist = qemu_find_opts("machine");
+ qemu_opts_parse_noisily(olist, "accel=hax", false);
+ break;
case QEMU_OPTION_M:
case QEMU_OPTION_machine:
olist = qemu_find_opts("machine");
@@ -3914,6 +3885,9 @@ int main(int argc, char **argv, char **envp)
}
incoming = optarg;
break;
+ case QEMU_OPTION_only_migratable:
+ only_migratable = 1;
+ break;
case QEMU_OPTION_nodefaults:
has_defaults = 0;
break;
@@ -4418,8 +4392,8 @@ int main(int argc, char **argv, char **envp)
cpu_ticks_init();
if (icount_opts) {
- if (kvm_enabled() || xen_enabled()) {
- error_report("-icount is not allowed with kvm or xen");
+ if (!tcg_enabled()) {
+ error_report("-icount is not allowed with hardware virtualization");
exit(1);
}
configure_icount(icount_opts, &error_abort);
@@ -4553,7 +4527,9 @@ int main(int argc, char **argv, char **envp)
cpu_synchronize_all_post_init();
- numa_post_machine_init();
+ if (hax_enabled()) {
+ hax_sync_vcpus();
+ }
if (qemu_opts_foreach(qemu_find_opts("fw_cfg"),
parse_fw_cfg, fw_cfg_find(), NULL) != 0) {
@@ -4575,6 +4551,9 @@ int main(int argc, char **argv, char **envp)
device_init_func, NULL, NULL)) {
exit(1);
}
+
+ numa_post_machine_init();
+
rom_reset_order_override();
/* Did we create any drives that we failed to create a device for? */
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 8f3a592013..31debdfb2c 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -163,6 +163,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
err = g_malloc0(nb_pfn * sizeof (int));
if (entry->vaddr_base != NULL) {
+ ram_block_notify_remove(entry->vaddr_base, entry->size);
if (munmap(entry->vaddr_base, entry->size) != 0) {
perror("unmap fails");
exit(-1);
@@ -188,6 +189,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
+ ram_block_notify_add(entry->vaddr_base, entry->size);
bitmap_zero(entry->valid_mapping, nb_pfn);
for (i = 0; i < nb_pfn; i++) {
if (!err[i]) {
@@ -397,6 +399,7 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
}
pentry->next = entry->next;
+ ram_block_notify_remove(entry->vaddr_base, entry->size);
if (munmap(entry->vaddr_base, entry->size) != 0) {
perror("unmap fails");
exit(-1);