aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--MAINTAINERS20
-rw-r--r--Makefile12
-rw-r--r--VERSION2
-rw-r--r--accel/stubs/Makefile.objs1
-rw-r--r--accel/stubs/hvf-stub.c31
-rw-r--r--accel/tcg/cpu-exec-common.c1
-rw-r--r--accel/tcg/cpu-exec.c18
-rw-r--r--accel/tcg/tcg-runtime.c5
-rw-r--r--accel/tcg/translate-all.c3
-rw-r--r--backends/tpm.c92
-rw-r--r--block.c89
-rw-r--r--block/backup.c118
-rw-r--r--block/commit.c8
-rw-r--r--block/curl.c24
-rw-r--r--block/dirty-bitmap.c5
-rw-r--r--block/dmg.h1
-rw-r--r--block/io.c164
-rw-r--r--block/iscsi.c51
-rw-r--r--block/nbd.c1
-rw-r--r--block/null.c3
-rw-r--r--block/qcow2.c51
-rw-r--r--block/qcow2.h3
-rw-r--r--block/replication.c6
-rw-r--r--block/sheepdog.c169
-rw-r--r--blockdev-nbd.c50
-rw-r--r--blockdev.c270
-rw-r--r--blockjob.c52
-rw-r--r--bsd-user/main.c1
-rw-r--r--chardev/baum.c8
-rw-r--r--chardev/char-mux.c10
-rw-r--r--chardev/char-socket.c73
-rw-r--r--chardev/char.c18
-rw-r--r--chardev/wctablet.c4
-rwxr-xr-xconfigure100
-rw-r--r--contrib/systemd/qemu-guest-agent.service11
-rw-r--r--contrib/systemd/qemu-pr-helper.service15
-rw-r--r--contrib/systemd/qemu-pr-helper.socket9
-rw-r--r--cpus.c123
-rw-r--r--default-configs/arm-softmmu.mak2
-rw-r--r--disas/arm.c2
-rw-r--r--disas/nios2.c3
-rw-r--r--docs/devel/multiple-iothreads.txt7
-rw-r--r--docs/devel/qapi-code-gen.txt2
-rw-r--r--dump.c7
-rw-r--r--exec.c47
-rw-r--r--fsdev/file-op-9p.h36
-rw-r--r--fsdev/qemu-fsdev.c4
-rw-r--r--gdbstub.c113
-rw-r--r--hmp-commands.hx33
-rw-r--r--hmp.c6
-rw-r--r--hw/9pfs/9p-handle.c54
-rw-r--r--hw/9pfs/9p-local.c36
-rw-r--r--hw/9pfs/9p-proxy.c30
-rw-r--r--hw/9pfs/9p-synth.c2
-rw-r--r--hw/9pfs/9p-xattr.h5
-rw-r--r--hw/9pfs/9p.c22
-rw-r--r--hw/9pfs/9p.h14
-rw-r--r--hw/9pfs/virtio-9p-device.c62
-rw-r--r--hw/9pfs/xen-9p-backend.c2
-rw-r--r--hw/acpi/core.c1
-rw-r--r--hw/acpi/ipmi-stub.c1
-rw-r--r--hw/alpha/dp264.c4
-rw-r--r--hw/arm/spitz.c1
-rw-r--r--hw/arm/xlnx-zcu102.c23
-rw-r--r--hw/arm/xlnx-zynqmp.c26
-rw-r--r--hw/audio/fmopl.c1
-rw-r--r--hw/audio/fmopl.h1
-rw-r--r--hw/audio/pcspk.c1
-rw-r--r--hw/block/block.c15
-rw-r--r--hw/block/dataplane/virtio-blk.c12
-rw-r--r--hw/block/dataplane/virtio-blk.h2
-rw-r--r--hw/block/fdc.c17
-rw-r--r--hw/block/m25p80.c80
-rw-r--r--hw/block/nvme.c372
-rw-r--r--hw/block/trace-events100
-rw-r--r--hw/block/virtio-blk.c30
-rw-r--r--hw/block/xen_disk.c53
-rw-r--r--hw/char/debugcon.c1
-rw-r--r--hw/char/xen_console.c1
-rw-r--r--hw/core/machine.c1
-rw-r--r--hw/core/qdev-properties-system.c1
-rw-r--r--hw/cpu/core.c1
-rw-r--r--hw/display/cirrus_vga.c1
-rw-r--r--hw/display/qxl.h1
-rw-r--r--hw/display/tc6393xb.c1
-rw-r--r--hw/display/vga-isa-mm.c4
-rw-r--r--hw/display/vga-isa.c3
-rw-r--r--hw/display/vga-pci.c1
-rw-r--r--hw/display/vga.c5
-rw-r--r--hw/display/vga_int.h3
-rw-r--r--hw/display/vga_regs.h (renamed from hw/display/vga.h)0
-rw-r--r--hw/display/virtio-vga.c1
-rw-r--r--hw/display/vmware_vga.c1
-rw-r--r--hw/display/xenfb.c294
-rw-r--r--hw/i2c/pm_smbus.c1
-rw-r--r--hw/i2c/smbus_ich9.c1
-rw-r--r--hw/i386/Makefile.objs2
-rw-r--r--hw/i386/acpi-build.c35
-rw-r--r--hw/i386/amd_iommu.c5
-rw-r--r--hw/i386/amd_iommu.h5
-rw-r--r--hw/i386/kvm/i8259.c1
-rw-r--r--hw/i386/pc.c5
-rw-r--r--hw/i386/trace-events4
-rw-r--r--hw/i386/vmmouse.c (renamed from hw/input/vmmouse.c)1
-rw-r--r--hw/i386/vmport.c (renamed from hw/misc/vmport.c)24
-rw-r--r--hw/i386/xen/xen-mapcache.c2
-rw-r--r--hw/i386/xen/xen_platform.c1
-rw-r--r--hw/ide/ahci.c1
-rw-r--r--hw/ide/cmd646.c1
-rw-r--r--hw/ide/core.c3
-rw-r--r--hw/ide/ich.c1
-rw-r--r--hw/ide/isa.c1
-rw-r--r--hw/ide/microdrive.c1
-rw-r--r--hw/ide/pci.c1
-rw-r--r--hw/ide/piix.c2
-rw-r--r--hw/ide/qdev.c12
-rw-r--r--hw/ide/via.c1
-rw-r--r--hw/input/Makefile.objs3
-rw-r--r--hw/input/adb-internal.h49
-rw-r--r--hw/input/adb-kbd.c400
-rw-r--r--hw/input/adb-mouse.c254
-rw-r--r--hw/input/adb.c622
-rw-r--r--hw/input/hid.c8
-rw-r--r--hw/input/trace-events8
-rw-r--r--hw/intc/apic.c12
-rw-r--r--hw/intc/arm_gicv3_its_common.c2
-rw-r--r--hw/intc/arm_gicv3_its_kvm.c53
-rw-r--r--hw/intc/armv7m_nvic.c100
-rw-r--r--hw/intc/i8259.c86
-rw-r--r--hw/intc/i8259_common.c49
-rw-r--r--hw/intc/lm32_pic.c1
-rw-r--r--hw/intc/openpic.c102
-rw-r--r--hw/intc/trace-events11
-rw-r--r--hw/intc/xics.c34
-rw-r--r--hw/intc/xics_spapr.c116
-rw-r--r--hw/ipmi/isa_ipmi_bt.c1
-rw-r--r--hw/ipmi/isa_ipmi_kcs.c1
-rw-r--r--hw/isa/i82378.c5
-rw-r--r--hw/isa/vt82c686.c1
-rw-r--r--hw/mem/pc-dimm.c2
-rw-r--r--hw/mips/boston.c14
-rw-r--r--hw/mips/mips_fulong2e.c4
-rw-r--r--hw/mips/mips_jazz.c5
-rw-r--r--hw/mips/mips_malta.c4
-rw-r--r--hw/mips/mips_r4k.c5
-rw-r--r--hw/misc/Makefile.objs2
-rw-r--r--hw/misc/imx6_ccm.c2
-rw-r--r--hw/misc/ivshmem.c1
-rw-r--r--hw/misc/pvpanic.c12
-rw-r--r--hw/misc/sga.c1
-rw-r--r--hw/misc/vmcoreinfo.c3
-rw-r--r--hw/moxie/moxiesim.c13
-rw-r--r--hw/net/e1000.c92
-rw-r--r--hw/net/e1000e.c4
-rw-r--r--hw/net/e1000e_core.c16
-rw-r--r--hw/net/e1000e_core.h2
-rw-r--r--hw/net/e1000x_common.h2
-rw-r--r--hw/net/eepro100.c32
-rw-r--r--hw/net/ftgmac100.c2
-rw-r--r--hw/net/lan9118.c3
-rw-r--r--hw/net/ne2000-isa.c6
-rw-r--r--hw/net/ne2000.c4
-rw-r--r--hw/net/ne2000.h3
-rw-r--r--hw/net/opencores_eth.c3
-rw-r--r--hw/net/pcnet.c22
-rw-r--r--hw/net/rtl8139.c2
-rw-r--r--hw/net/sungem.c5
-rw-r--r--hw/net/sunhme.c25
-rw-r--r--hw/nios2/boot.c1
-rw-r--r--hw/nvram/Makefile.objs1
-rw-r--r--hw/nvram/eeprom_at24c.c205
-rw-r--r--hw/pci-bridge/pci_expander_bridge.c1
-rw-r--r--hw/pci-host/ppce500.c5
-rw-r--r--hw/ppc/e500.c4
-rw-r--r--hw/ppc/pnv.c2
-rw-r--r--hw/ppc/pnv_core.c10
-rw-r--r--hw/ppc/prep.c1
-rw-r--r--hw/ppc/spapr.c222
-rw-r--r--hw/ppc/spapr_cpu_core.c42
-rw-r--r--hw/ppc/spapr_events.c22
-rw-r--r--hw/ppc/spapr_hcall.c1
-rw-r--r--hw/ppc/spapr_pci.c13
-rw-r--r--hw/ppc/spapr_rtas.c21
-rw-r--r--hw/ppc/spapr_vio.c5
-rw-r--r--hw/ppc/trace-events4
-rw-r--r--hw/s390x/3270-ccw.c2
-rw-r--r--hw/s390x/css-bridge.c13
-rw-r--r--hw/s390x/css.c35
-rw-r--r--hw/s390x/s390-ccw.c2
-rw-r--r--hw/s390x/s390-pci-bus.h1
-rw-r--r--hw/s390x/s390-pci-inst.c337
-rw-r--r--hw/s390x/s390-pci-inst.h22
-rw-r--r--hw/s390x/s390-virtio-ccw.c59
-rw-r--r--hw/s390x/virtio-ccw.c4
-rw-r--r--hw/scsi/scsi-bus.c16
-rw-r--r--hw/scsi/scsi-disk.c14
-rw-r--r--hw/scsi/vhost-user-scsi.c1
-rw-r--r--hw/smbios/smbios_type_38-stub.c1
-rw-r--r--hw/sparc/sun4m.c1
-rw-r--r--hw/ssi/aspeed_smc.c3
-rw-r--r--hw/ssi/xilinx_spips.c928
-rw-r--r--hw/timer/i8254.c1
-rw-r--r--hw/timer/i8254_common.c1
-rw-r--r--hw/timer/mc146818rtc.c2
-rw-r--r--hw/tpm/Makefile.objs5
-rw-r--r--hw/tpm/tpm_emulator.c116
-rw-r--r--hw/tpm/tpm_int.h31
-rw-r--r--hw/tpm/tpm_ioctl.h28
-rw-r--r--hw/tpm/tpm_passthrough.c91
-rw-r--r--hw/tpm/tpm_tis.c215
-rw-r--r--hw/tpm/tpm_util.c229
-rw-r--r--hw/tpm/tpm_util.h21
-rw-r--r--hw/unicore32/puv3.c15
-rw-r--r--hw/usb/bus.c22
-rw-r--r--hw/usb/dev-storage.c29
-rw-r--r--hw/vfio/ccw.c2
-rw-r--r--hw/vfio/common.c8
-rw-r--r--hw/vfio/pci.h2
-rw-r--r--hw/virtio/vhost-vsock.c2
-rw-r--r--hw/virtio/virtio-balloon.c2
-rw-r--r--hw/watchdog/wdt_ib700.c1
-rw-r--r--hw/xen/xen_pt.c1
-rw-r--r--include/block/block.h15
-rw-r--r--include/block/block_int.h6
-rw-r--r--include/block/dirty-bitmap.h1
-rw-r--r--include/chardev/char.h1
-rw-r--r--include/exec/exec-all.h6
-rw-r--r--include/exec/gen-icount.h9
-rw-r--r--include/exec/helper-gen.h11
-rw-r--r--include/exec/helper-head.h2
-rw-r--r--include/exec/helper-proto.h5
-rw-r--r--include/exec/helper-tcg.h7
-rw-r--r--include/exec/memory.h12
-rw-r--r--include/hw/acpi/acpi-defs.h7
-rw-r--r--include/hw/acpi/acpi.h11
-rw-r--r--include/hw/acpi/ich9.h2
-rw-r--r--include/hw/acpi/ipmi.h1
-rw-r--r--include/hw/arm/xlnx-zynqmp.h5
-rw-r--r--include/hw/block/block.h4
-rw-r--r--include/hw/compat.h3
-rw-r--r--include/hw/cpu/core.h1
-rw-r--r--include/hw/display/vga.h25
-rw-r--r--include/hw/i2c/ppc4xx_i2c.h1
-rw-r--r--include/hw/i386/apic.h1
-rw-r--r--include/hw/i386/pc.h48
-rw-r--r--include/hw/intc/armv7m_nvic.h4
-rw-r--r--include/hw/isa/i8259_internal.h7
-rw-r--r--include/hw/misc/pvpanic.h21
-rw-r--r--include/hw/net/ne2000-isa.h33
-rw-r--r--include/hw/pci-host/spapr.h2
-rw-r--r--include/hw/ppc/spapr.h17
-rw-r--r--include/hw/ppc/spapr_cpu_core.h2
-rw-r--r--include/hw/ppc/spapr_vio.h2
-rw-r--r--include/hw/ppc/xics.h8
-rw-r--r--include/hw/qdev-properties.h1
-rw-r--r--include/hw/registerfields.h15
-rw-r--r--include/hw/s390x/css.h13
-rw-r--r--include/hw/ssi/xilinx_spips.h74
-rw-r--r--include/hw/timer/i8254.h5
-rw-r--r--include/hw/timer/i8254_internal.h2
-rw-r--r--include/hw/timer/mc146818rtc.h3
-rw-r--r--include/hw/unicore32/puv3.h10
-rw-r--r--include/hw/usb.h1
-rw-r--r--include/hw/virtio/virtio-blk.h1
-rw-r--r--include/io/net-listener.h174
-rw-r--r--include/net/net.h5
-rw-r--r--include/net/slirp.h2
-rw-r--r--include/qemu/coroutine.h6
-rw-r--r--include/qemu/hbitmap.h8
-rw-r--r--include/qemu/option.h5
-rw-r--r--include/qemu/osdep.h3
-rw-r--r--include/qemu/qht.h6
-rw-r--r--include/qemu/queue.h5
-rw-r--r--include/qemu/sockets.h2
-rw-r--r--include/qemu/typedefs.h1
-rw-r--r--include/qemu/uuid.h2
-rw-r--r--include/qom/cpu.h2
-rw-r--r--include/scsi/utils.h9
-rw-r--r--include/standard-headers/asm-s390/virtio-ccw.h1
-rw-r--r--include/standard-headers/asm-x86/hyperv.h394
-rw-r--r--include/standard-headers/linux/input-event-codes.h2
-rw-r--r--include/standard-headers/linux/input.h1
-rw-r--r--include/standard-headers/linux/pci_regs.h45
-rw-r--r--include/sysemu/hax.h1
-rw-r--r--include/sysemu/hvf.h107
-rw-r--r--include/sysemu/iothread.h4
-rw-r--r--include/sysemu/numa.h10
-rw-r--r--include/sysemu/sysemu.h2
-rw-r--r--include/sysemu/tpm.h48
-rw-r--r--include/sysemu/tpm_backend.h50
-rw-r--r--include/ui/input.h3
-rw-r--r--io/Makefile.objs1
-rw-r--r--io/net-listener.c307
-rw-r--r--iothread.c27
-rw-r--r--linux-headers/asm-arm/kvm.h8
-rw-r--r--linux-headers/asm-arm/kvm_para.h1
-rw-r--r--linux-headers/asm-arm/unistd.h2
-rw-r--r--linux-headers/asm-arm64/kvm.h8
-rw-r--r--linux-headers/asm-arm64/unistd.h1
-rw-r--r--linux-headers/asm-powerpc/epapr_hcalls.h1
-rw-r--r--linux-headers/asm-powerpc/kvm.h1
-rw-r--r--linux-headers/asm-powerpc/kvm_para.h1
-rw-r--r--linux-headers/asm-powerpc/unistd.h1
-rw-r--r--linux-headers/asm-s390/kvm.h1
-rw-r--r--linux-headers/asm-s390/kvm_para.h1
-rw-r--r--linux-headers/asm-s390/unistd.h4
-rw-r--r--linux-headers/asm-x86/kvm.h1
-rw-r--r--linux-headers/asm-x86/kvm_para.h2
-rw-r--r--linux-headers/asm-x86/unistd.h1
-rw-r--r--linux-headers/linux/kvm.h2
-rw-r--r--linux-headers/linux/kvm_para.h1
-rw-r--r--linux-headers/linux/psci.h1
-rw-r--r--linux-headers/linux/userfaultfd.h1
-rw-r--r--linux-headers/linux/vfio.h1
-rw-r--r--linux-headers/linux/vfio_ccw.h1
-rw-r--r--linux-headers/linux/vhost.h1
-rw-r--r--linux-user/main.c27
-rw-r--r--linux-user/signal.c9
-rw-r--r--memory.c5
-rw-r--r--migration/block.c2
-rw-r--r--monitor.c1
-rw-r--r--nbd/server.c79
-rw-r--r--nbd/trace-events1
-rw-r--r--net/colo-compare.c1
-rw-r--r--net/net.c40
-rw-r--r--net/slirp.c34
-rw-r--r--numa.c95
-rw-r--r--pc-bios/s390-ccw.imgbin26416 -> 26416 bytes
-rw-r--r--pc-bios/s390-ccw/start.S30
-rw-r--r--qapi-schema.json13
-rw-r--r--qapi/block-core.json44
-rw-r--r--qemu-doc.texi312
-rw-r--r--qemu-io-cmds.c3
-rw-r--r--qemu-nbd.c61
-rw-r--r--qemu-options-wrapper.h2
-rw-r--r--qemu-options.hx367
-rw-r--r--qga/channel-posix.c2
-rw-r--r--qmp.c5
-rwxr-xr-xscripts/checkpatch.pl7
-rw-r--r--scripts/coccinelle/cpu_restore_state.cocci19
-rw-r--r--scripts/dump-guest-memory.py3
-rwxr-xr-xscripts/git-submodule.sh2
-rw-r--r--scripts/hxtool3
-rw-r--r--scripts/qapi.py107
-rwxr-xr-xscripts/qapi2texi.py65
-rw-r--r--scsi/qemu-pr-helper.c30
-rw-r--r--scsi/utils.c163
-rw-r--r--target/alpha/mem_helper.c13
-rw-r--r--target/alpha/translate.c22
-rw-r--r--target/arm/cpu.c13
-rw-r--r--target/arm/cpu.h73
-rw-r--r--target/arm/helper.c489
-rw-r--r--target/arm/helper.h2
-rw-r--r--target/arm/internals.h193
-rw-r--r--target/arm/op_helper.c100
-rw-r--r--target/arm/translate-a64.c37
-rw-r--r--target/arm/translate.c68
-rw-r--r--target/arm/translate.h10
-rw-r--r--target/cris/translate.c4
-rw-r--r--target/hppa/translate.c63
-rw-r--r--target/i386/Makefile.objs1
-rw-r--r--target/i386/cpu-qom.h4
-rw-r--r--target/i386/cpu.c98
-rw-r--r--target/i386/cpu.h113
-rw-r--r--target/i386/hax-darwin.c6
-rw-r--r--target/i386/hax-darwin.h3
-rw-r--r--target/i386/hax-windows.h3
-rw-r--r--target/i386/hvf/Makefile.objs2
-rw-r--r--target/i386/hvf/README.md7
-rw-r--r--target/i386/hvf/hvf-i386.h48
-rw-r--r--target/i386/hvf/hvf.c959
-rw-r--r--target/i386/hvf/panic.h45
-rw-r--r--target/i386/hvf/vmcs.h374
-rw-r--r--target/i386/hvf/vmx.h222
-rw-r--r--target/i386/hvf/x86.c186
-rw-r--r--target/i386/hvf/x86.h400
-rw-r--r--target/i386/hvf/x86_cpuid.c166
-rw-r--r--target/i386/hvf/x86_decode.c2186
-rw-r--r--target/i386/hvf/x86_decode.h323
-rw-r--r--target/i386/hvf/x86_descr.c125
-rw-r--r--target/i386/hvf/x86_descr.h58
-rw-r--r--target/i386/hvf/x86_emu.c1483
-rw-r--r--target/i386/hvf/x86_emu.h49
-rw-r--r--target/i386/hvf/x86_flags.c315
-rw-r--r--target/i386/hvf/x86_flags.h80
-rw-r--r--target/i386/hvf/x86_mmu.c272
-rw-r--r--target/i386/hvf/x86_mmu.h43
-rw-r--r--target/i386/hvf/x86_task.c191
-rw-r--r--target/i386/hvf/x86_task.h18
-rw-r--r--target/i386/hvf/x86hvf.c465
-rw-r--r--target/i386/hvf/x86hvf.h39
-rw-r--r--target/i386/kvm.c48
-rw-r--r--target/i386/svm_helper.c4
-rw-r--r--target/i386/translate.c22
-rw-r--r--target/lm32/op_helper.c7
-rw-r--r--target/lm32/translate.c2
-rw-r--r--target/m68k/Makefile.objs1
-rw-r--r--target/m68k/cpu.c20
-rw-r--r--target/m68k/cpu.h86
-rw-r--r--target/m68k/gdbstub.c2
-rw-r--r--target/m68k/helper.c99
-rw-r--r--target/m68k/helper.h11
-rw-r--r--target/m68k/monitor.c40
-rw-r--r--target/m68k/op_helper.c345
-rw-r--r--target/m68k/translate.c522
-rw-r--r--target/microblaze/op_helper.c7
-rw-r--r--target/microblaze/translate.c4
-rw-r--r--target/mips/translate.c2
-rw-r--r--target/moxie/helper.c5
-rw-r--r--target/nios2/cpu.h1
-rw-r--r--target/nios2/helper.c5
-rw-r--r--target/nios2/mmu.c7
-rw-r--r--target/nios2/op_helper.c1
-rw-r--r--target/nios2/translate.c7
-rw-r--r--target/openrisc/exception_helper.c1
-rw-r--r--target/openrisc/mmu_helper.c6
-rw-r--r--target/ppc/cpu-qom.h1
-rw-r--r--target/ppc/cpu.h105
-rw-r--r--target/ppc/kvm.c3
-rw-r--r--target/ppc/translate.c25
-rw-r--r--target/ppc/translate_init.c26
-rw-r--r--target/s390x/cc_helper.c2
-rw-r--r--target/s390x/cpu.h31
-rw-r--r--target/s390x/cpu_models.c103
-rw-r--r--target/s390x/cpu_models.h1
-rw-r--r--target/s390x/crypto_helper.c7
-rw-r--r--target/s390x/diag.c14
-rw-r--r--target/s390x/excp_helper.c17
-rw-r--r--target/s390x/fpu_helper.c2
-rw-r--r--target/s390x/gen-features.c88
-rw-r--r--target/s390x/helper.c18
-rw-r--r--target/s390x/helper.h6
-rw-r--r--target/s390x/insn-data.def29
-rw-r--r--target/s390x/int_helper.c14
-rw-r--r--target/s390x/internal.h41
-rw-r--r--target/s390x/interrupt.c9
-rw-r--r--target/s390x/ioinst.c113
-rw-r--r--target/s390x/kvm.c84
-rw-r--r--target/s390x/mem_helper.c35
-rw-r--r--target/s390x/misc_helper.c111
-rw-r--r--target/s390x/mmu_helper.c23
-rw-r--r--target/s390x/translate.c233
-rw-r--r--target/sh4/cpu.h4
-rw-r--r--target/sh4/helper.c1
-rw-r--r--target/sh4/translate.c277
-rw-r--r--target/sparc/translate.c2
-rw-r--r--target/tilegx/translate.c10
-rw-r--r--target/tricore/op_helper.c13
-rw-r--r--target/unicore32/op_helper.c7
-rw-r--r--target/unicore32/translate.c4
-rw-r--r--tcg/optimize.c20
-rw-r--r--tcg/tcg-op.c24
-rw-r--r--tcg/tcg-op.h4
-rw-r--r--tcg/tcg.c149
-rw-r--r--tcg/tcg.h60
-rw-r--r--tcg/tci.c12
-rw-r--r--tcg/tci/tcg-target.inc.c6
-rw-r--r--tests/Makefile.include27
-rw-r--r--tests/boot-serial-test.c69
-rwxr-xr-xtests/docker/test-full79
-rw-r--r--tests/qapi-schema/doc-bad-section.err0
-rw-r--r--tests/qapi-schema/doc-bad-section.exit1
-rw-r--r--tests/qapi-schema/doc-bad-section.json11
-rw-r--r--tests/qapi-schema/doc-bad-section.out13
-rw-r--r--tests/qapi-schema/doc-good.json1
-rw-r--r--tests/qapi-schema/doc-good.out4
-rw-r--r--tests/qapi-schema/doc-good.texi11
-rw-r--r--tests/qapi-schema/test-qapi.py6
-rwxr-xr-xtests/qemu-iotests/1974
-rwxr-xr-xtests/qemu-iotests/20295
-rw-r--r--tests/qemu-iotests/202.out11
-rwxr-xr-xtests/qemu-iotests/20359
-rw-r--r--tests/qemu-iotests/203.out6
-rw-r--r--tests/qemu-iotests/common.filter3
-rw-r--r--tests/qemu-iotests/group2
-rw-r--r--tests/qemu-iotests/iotests.py5
-rw-r--r--tests/test-aio-multithread.c1
-rw-r--r--tests/test-bdrv-drain.c651
-rw-r--r--tests/test-char.c17
-rw-r--r--tests/test-clone-visitor.c1
-rw-r--r--tests/test-hbitmap.c61
-rw-r--r--tests/test-hmp.c7
-rw-r--r--tests/test-uuid.c8
-rw-r--r--tests/vhost-user-test.c1
-rw-r--r--tests/virtio-9p-test.c33
-rw-r--r--tests/vmgenid-test.c3
-rw-r--r--tpm.c34
-rw-r--r--trace-events28
-rw-r--r--trace/ftrace.c33
-rw-r--r--ui/input-keymap.c1
-rw-r--r--util/hbitmap.c39
-rw-r--r--util/memfd.c4
-rw-r--r--util/mmap-alloc.c8
-rw-r--r--util/qemu-coroutine-sleep.c4
-rw-r--r--util/qemu-option.c36
-rw-r--r--util/qemu-sockets.c32
-rw-r--r--util/qemu-thread-posix.c59
-rw-r--r--util/rcu.c6
-rw-r--r--util/uuid.c7
-rw-r--r--vl.c137
501 files changed, 19387 insertions, 6118 deletions
diff --git a/.gitignore b/.gitignore
index 588769b250..433f64f429 100644
--- a/.gitignore
+++ b/.gitignore
@@ -53,7 +53,6 @@
/qemu-version.h.tmp
/module_block.h
/scsi/qemu-pr-helper
-/vscclient
/vhost-user-scsi
/fsdev/virtfs-proxy-helper
*.tmp
diff --git a/MAINTAINERS b/MAINTAINERS
index 4e2795bc02..5628322fe6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -544,7 +544,7 @@ F: include/hw/*/xlnx*.h
ARM ACPI Subsystem
M: Shannon Zhao <zhaoshenglong@huawei.com>
-M: Shannon Zhao <shannon.zhao@linaro.org>
+M: Shannon Zhao <shannon.zhaosl@gmail.com>
L: qemu-arm@nongnu.org
S: Maintained
F: hw/arm/virt-acpi-build.c
@@ -733,7 +733,11 @@ F: hw/ppc/prep.c
F: hw/ppc/prep_systemio.c
F: hw/ppc/rs6000_mc.c
F: hw/pci-host/prep.[hc]
+F: hw/isa/i82378.c
F: hw/isa/pc87312.[hc]
+F: hw/dma/i82374.c
+F: hw/timer/m48t59-isa.c
+F: include/hw/timer/m48t59.h
F: pc-bios/ppc_rom.bin
sPAPR
@@ -862,12 +866,13 @@ F: hw/misc/sga.c
PC Chipset
M: Michael S. Tsirkin <mst@redhat.com>
M: Paolo Bonzini <pbonzini@redhat.com>
-S: Support
+S: Supported
F: hw/char/debugcon.c
F: hw/char/parallel.c
F: hw/char/serial*
F: hw/dma/i8257*
F: hw/i2c/pm_smbus.c
+F: hw/input/pckbd.c
F: hw/intc/apic*
F: hw/intc/ioapic*
F: hw/intc/i8259*
@@ -876,7 +881,10 @@ F: hw/misc/pc-testdev.c
F: hw/timer/hpet*
F: hw/timer/i8254*
F: hw/timer/mc146818rtc*
+F: hw/watchdog/wdt_ib700.c
+F: include/hw/display/vga.h
F: include/hw/i2c/pm_smbus.h
+F: include/hw/isa/i8257.h
F: include/hw/timer/hpet.h
F: include/hw/timer/i8254*
F: include/hw/timer/mc146818rtc*
@@ -977,7 +985,9 @@ M: Alexander Graf <agraf@suse.de>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/ppc4*.c
+F: hw/i2c/ppc4xx_i2c.c
F: include/hw/ppc/ppc4xx.h
+F: include/hw/i2c/ppc4xx_i2c.h
ppce500
M: Alexander Graf <agraf@suse.de>
@@ -996,11 +1006,13 @@ Network devices
M: Jason Wang <jasowang@redhat.com>
S: Odd Fixes
F: hw/net/
+F: include/hw/net/
F: tests/virtio-net-test.c
T: git git://github.com/jasowang/qemu.git net
SCSI
M: Paolo Bonzini <pbonzini@redhat.com>
+R: Fam Zheng <famz@redhat.com>
S: Supported
F: include/hw/scsi/*
F: hw/scsi/*
@@ -1072,13 +1084,11 @@ F: include/hw/virtio/
F: tests/virtio-balloon-test.c
virtio-9p
-M: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
M: Greg Kurz <groug@kaod.org>
S: Supported
F: hw/9pfs/
F: fsdev/
F: tests/virtio-9p-test.c
-T: git git://github.com/kvaneesh/QEMU.git
T: git git://github.com/gkurz/qemu.git 9p-next
virtio-blk
@@ -1261,6 +1271,7 @@ T: git git://github.com/stefanha/qemu.git block
Block SCSI subsystem
M: Paolo Bonzini <pbonzini@redhat.com>
+R: Fam Zheng <famz@redhat.com>
L: qemu-block@nongnu.org
S: Supported
F: include/scsi/*
@@ -1539,6 +1550,7 @@ M: Alistair Francis <alistair.francis@xilinx.com>
S: Maintained
F: hw/core/register.c
F: include/hw/register.h
+F: include/hw/registerfields.h
SLIRP
M: Samuel Thibault <samuel.thibault@ens-lyon.org>
diff --git a/Makefile b/Makefile
index ab0354c153..d86ecd2dd4 100644
--- a/Makefile
+++ b/Makefile
@@ -6,7 +6,10 @@ BUILD_DIR=$(CURDIR)
# Before including a proper config-host.mak, assume we are in the source tree
SRC_PATH=.
-UNCHECKED_GOALS := %clean TAGS cscope ctags docker docker-% help
+UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
+ html info pdf txt \
+ help check-help \
+ docker docker-% vm-test vm-build-%
# All following code might depend on configuration variables
ifneq ($(wildcard config-host.mak),)
@@ -50,7 +53,7 @@ ifneq ($(realpath $(SRC_PATH)),$(realpath .))
ifneq ($(wildcard $(SRC_PATH)/config-host.mak),)
$(error This is an out of tree build but your source tree ($(SRC_PATH)) \
seems to have been used for an in-tree build. You can fix this by running \
-"make distclean && rm -rf *-linux-user *-softmmu" in your source tree)
+"$(MAKE) distclean && rm -rf *-linux-user *-softmmu" in your source tree)
endif
endif
@@ -229,6 +232,7 @@ KEYCODEMAP_FILES = \
ui/input-keymap-linux-to-qcode.c \
ui/input-keymap-qcode-to-qnum.c \
ui/input-keymap-qnum-to-qcode.c \
+ ui/input-keymap-qcode-to-linux.c \
$(NULL)
GENERATED_FILES += $(KEYCODEMAP_FILES)
@@ -303,7 +307,7 @@ endif
else \
echo "WARNING: $@ out of date.";\
fi; \
- echo "Run \"make defconfig\" to regenerate."; \
+ echo "Run \"$(MAKE) defconfig\" to regenerate."; \
rm $@.tmp; \
fi; \
else \
@@ -933,4 +937,4 @@ ifdef QEMU_GA_MSI_ENABLED
endif
@echo ''
endif
- @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
+ @echo ' $(MAKE) V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
diff --git a/VERSION b/VERSION
index 46b81d815a..789af8370b 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.11.0
+2.11.50
diff --git a/accel/stubs/Makefile.objs b/accel/stubs/Makefile.objs
index c071abaf4e..779343b0c0 100644
--- a/accel/stubs/Makefile.objs
+++ b/accel/stubs/Makefile.objs
@@ -1,3 +1,4 @@
obj-$(call lnot,$(CONFIG_HAX)) += hax-stub.o
+obj-$(call lnot,$(CONFIG_HVF)) += hvf-stub.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-$(call lnot,$(CONFIG_TCG)) += tcg-stub.o
diff --git a/accel/stubs/hvf-stub.c b/accel/stubs/hvf-stub.c
new file mode 100644
index 0000000000..a79f9fc36f
--- /dev/null
+++ b/accel/stubs/hvf-stub.c
@@ -0,0 +1,31 @@
+/*
+ * QEMU HVF support
+ *
+ * Copyright 2017 Red Hat, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2 or later, as published by the Free Software Foundation,
+ * and may be copied, distributed, and modified under those terms.
+ *
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "sysemu/hvf.h"
+
+int hvf_init_vcpu(CPUState *cpu)
+{
+ return -ENOSYS;
+}
+
+int hvf_vcpu_exec(CPUState *cpu)
+{
+ return -ENOSYS;
+}
+
+void hvf_vcpu_destroy(CPUState *cpu)
+{
+}
diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c
index 5b4ae54a4d..dac5aac477 100644
--- a/accel/tcg/cpu-exec-common.c
+++ b/accel/tcg/cpu-exec-common.c
@@ -21,7 +21,6 @@
#include "cpu.h"
#include "sysemu/cpus.h"
#include "exec/exec-all.h"
-#include "exec/memory-internal.h"
bool tcg_allowed;
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 9b544d88c8..280200f737 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -146,8 +146,10 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
uint8_t *tb_ptr = itb->tc.ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
- "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
- itb->tc.ptr, cpu->cpu_index, itb->pc,
+ "Trace %d: %p ["
+ TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
+ cpu->cpu_index, itb->tc.ptr,
+ itb->cs_base, itb->pc, itb->flags,
lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
@@ -525,19 +527,13 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
- int32_t insns_left;
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
+ * Ensure zeroing happens before reading cpu->exit_request or
+ * cpu->interrupt_request (see also smp_wmb in cpu_exit())
*/
- insns_left = atomic_read(&cpu->icount_decr.u32);
- atomic_set(&cpu->icount_decr.u16.high, 0);
- if (unlikely(insns_left < 0)) {
- /* Ensure the zeroing of icount_decr comes before the next read
- * of cpu->exit_request or cpu->interrupt_request.
- */
- smp_mb();
- }
+ atomic_mb_set(&cpu->icount_decr.u16.high, 0);
if (unlikely(atomic_read(&cpu->interrupt_request))) {
int interrupt_request;
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
index 4172ffda82..d0d4484406 100644
--- a/accel/tcg/tcg-runtime.c
+++ b/accel/tcg/tcg-runtime.c
@@ -156,8 +156,9 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
return tcg_ctx->code_gen_epilogue;
}
qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
- "Chain %p [%d: " TARGET_FMT_lx "] %s\n",
- tb->tc.ptr, cpu->cpu_index, pc,
+ "Chain %d: %p ["
+ TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
+ cpu->cpu_index, tb->tc.ptr, cs_base, pc, flags,
lookup_symbol(pc));
return tb->tc.ptr;
}
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index e7f0329a52..7736257085 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -31,7 +31,6 @@
#include "tcg.h"
#if defined(CONFIG_USER_ONLY)
#include "qemu.h"
-#include "exec/exec-all.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/param.h>
#if __FreeBSD_version >= 700104
@@ -257,7 +256,7 @@ static target_long decode_sleb128(uint8_t **pp)
/* Encode the data collected about the instructions while compiling TB.
Place the data at BLOCK, and return the number of bytes consumed.
- The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
+ The logical table consists of TARGET_INSN_START_WORDS target_ulong's,
which come from the target's insn_start data, followed by a uintptr_t
which comes from the host pc of the end of the code implementing the insn.
diff --git a/backends/tpm.c b/backends/tpm.c
index 5763f6f369..91222c5164 100644
--- a/backends/tpm.c
+++ b/backends/tpm.c
@@ -17,16 +17,25 @@
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "sysemu/tpm.h"
-#include "hw/tpm/tpm_int.h"
#include "qemu/thread.h"
+#include "qemu/main-loop.h"
+
+static void tpm_backend_request_completed_bh(void *opaque)
+{
+ TPMBackend *s = TPM_BACKEND(opaque);
+ TPMIfClass *tic = TPM_IF_GET_CLASS(s->tpmif);
+
+ tic->request_completed(s->tpmif);
+}
static void tpm_backend_worker_thread(gpointer data, gpointer user_data)
{
TPMBackend *s = TPM_BACKEND(user_data);
- TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
+ TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
- assert(k->handle_request != NULL);
k->handle_request(s, (TPMBackendCmd *)data);
+
+ qemu_bh_schedule(s->bh);
}
static void tpm_backend_thread_end(TPMBackend *s)
@@ -44,15 +53,22 @@ enum TpmType tpm_backend_get_type(TPMBackend *s)
return k->type;
}
-int tpm_backend_init(TPMBackend *s, TPMState *state)
+int tpm_backend_init(TPMBackend *s, TPMIf *tpmif, Error **errp)
{
- s->tpm_state = state;
+ if (s->tpmif) {
+ error_setg(errp, "TPM backend '%s' is already initialized", s->id);
+ return -1;
+ }
+
+ s->tpmif = tpmif;
+ object_ref(OBJECT(tpmif));
+
s->had_startup_error = false;
return 0;
}
-int tpm_backend_startup_tpm(TPMBackend *s)
+int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize)
{
int res = 0;
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
@@ -63,7 +79,7 @@ int tpm_backend_startup_tpm(TPMBackend *s)
s->thread_pool = g_thread_pool_new(tpm_backend_worker_thread, s, 1, TRUE,
NULL);
- res = k->startup_tpm ? k->startup_tpm(s) : 0;
+ res = k->startup_tpm ? k->startup_tpm(s, buffersize) : 0;
s->had_startup_error = (res != 0);
@@ -97,8 +113,6 @@ void tpm_backend_cancel_cmd(TPMBackend *s)
{
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
- assert(k->cancel_cmd);
-
k->cancel_cmd(s);
}
@@ -122,80 +136,44 @@ TPMVersion tpm_backend_get_tpm_version(TPMBackend *s)
{
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
- assert(k->get_tpm_version);
-
return k->get_tpm_version(s);
}
-TPMInfo *tpm_backend_query_tpm(TPMBackend *s)
+size_t tpm_backend_get_buffer_size(TPMBackend *s)
{
- TPMInfo *info = g_new0(TPMInfo, 1);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
- info->id = g_strdup(s->id);
- info->model = s->fe_model;
- if (k->get_tpm_options) {
- info->options = k->get_tpm_options(s);
- }
-
- return info;
+ return k->get_buffer_size(s);
}
-static bool tpm_backend_prop_get_opened(Object *obj, Error **errp)
-{
- TPMBackend *s = TPM_BACKEND(obj);
-
- return s->opened;
-}
-
-void tpm_backend_open(TPMBackend *s, Error **errp)
-{
- object_property_set_bool(OBJECT(s), true, "opened", errp);
-}
-
-static void tpm_backend_prop_set_opened(Object *obj, bool value, Error **errp)
+TPMInfo *tpm_backend_query_tpm(TPMBackend *s)
{
- TPMBackend *s = TPM_BACKEND(obj);
+ TPMInfo *info = g_new0(TPMInfo, 1);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
- Error *local_err = NULL;
-
- if (value == s->opened) {
- return;
- }
-
- if (!value && s->opened) {
- error_setg(errp, QERR_PERMISSION_DENIED);
- return;
- }
+ TPMIfClass *tic = TPM_IF_GET_CLASS(s->tpmif);
- if (k->opened) {
- k->opened(s, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- }
+ info->id = g_strdup(s->id);
+ info->model = tic->model;
+ info->options = k->get_tpm_options(s);
- s->opened = true;
+ return info;
}
static void tpm_backend_instance_init(Object *obj)
{
TPMBackend *s = TPM_BACKEND(obj);
- object_property_add_bool(obj, "opened",
- tpm_backend_prop_get_opened,
- tpm_backend_prop_set_opened,
- NULL);
- s->fe_model = -1;
+ s->bh = qemu_bh_new(tpm_backend_request_completed_bh, s);
}
static void tpm_backend_instance_finalize(Object *obj)
{
TPMBackend *s = TPM_BACKEND(obj);
+ object_unref(OBJECT(s->tpmif));
g_free(s->id);
tpm_backend_thread_end(s);
+ qemu_bh_delete(s->bh);
}
static const TypeInfo tpm_backend_info = {
diff --git a/block.c b/block.c
index 9a1a0d1e73..a8da4f2b25 100644
--- a/block.c
+++ b/block.c
@@ -822,6 +822,18 @@ static void bdrv_child_cb_drained_end(BdrvChild *child)
bdrv_drained_end(bs);
}
+static void bdrv_child_cb_attach(BdrvChild *child)
+{
+ BlockDriverState *bs = child->opaque;
+ bdrv_apply_subtree_drain(child, bs);
+}
+
+static void bdrv_child_cb_detach(BdrvChild *child)
+{
+ BlockDriverState *bs = child->opaque;
+ bdrv_unapply_subtree_drain(child, bs);
+}
+
static int bdrv_child_cb_inactivate(BdrvChild *child)
{
BlockDriverState *bs = child->opaque;
@@ -889,6 +901,8 @@ const BdrvChildRole child_file = {
.inherit_options = bdrv_inherited_options,
.drained_begin = bdrv_child_cb_drained_begin,
.drained_end = bdrv_child_cb_drained_end,
+ .attach = bdrv_child_cb_attach,
+ .detach = bdrv_child_cb_detach,
.inactivate = bdrv_child_cb_inactivate,
};
@@ -911,6 +925,8 @@ const BdrvChildRole child_format = {
.inherit_options = bdrv_inherited_fmt_options,
.drained_begin = bdrv_child_cb_drained_begin,
.drained_end = bdrv_child_cb_drained_end,
+ .attach = bdrv_child_cb_attach,
+ .detach = bdrv_child_cb_detach,
.inactivate = bdrv_child_cb_inactivate,
};
@@ -953,6 +969,8 @@ static void bdrv_backing_attach(BdrvChild *c)
parent->backing_blocker);
bdrv_op_unblock(backing_hd, BLOCK_OP_TYPE_BACKUP_TARGET,
parent->backing_blocker);
+
+ bdrv_child_cb_attach(c);
}
static void bdrv_backing_detach(BdrvChild *c)
@@ -963,6 +981,8 @@ static void bdrv_backing_detach(BdrvChild *c)
bdrv_op_unblock_all(c->bs, parent->backing_blocker);
error_free(parent->backing_blocker);
parent->backing_blocker = NULL;
+
+ bdrv_child_cb_detach(c);
}
/*
@@ -1924,6 +1944,8 @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
assert(role == &child_backing || role == &child_file);
if (!backing) {
+ int flags = bdrv_reopen_get_flags(reopen_queue, bs);
+
/* Apart from the modifications below, the same permissions are
* forwarded and left alone as for filters */
bdrv_filter_default_perms(bs, c, role, reopen_queue, perm, shared,
@@ -1936,7 +1958,9 @@ void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
/* bs->file always needs to be consistent because of the metadata. We
* can never allow other users to resize or write to it. */
- perm |= BLK_PERM_CONSISTENT_READ;
+ if (!(flags & BDRV_O_NO_IO)) {
+ perm |= BLK_PERM_CONSISTENT_READ;
+ }
shared &= ~(BLK_PERM_WRITE | BLK_PERM_RESIZE);
} else {
/* We want consistent read from backing files if the parent needs it.
@@ -1968,17 +1992,23 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
BlockDriverState *new_bs)
{
BlockDriverState *old_bs = child->bs;
+ int i;
if (old_bs && new_bs) {
assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs));
}
if (old_bs) {
- if (old_bs->quiesce_counter && child->role->drained_end) {
- child->role->drained_end(child);
- }
+ /* Detach first so that the recursive drain sections coming from @child
+ * are already gone and we only end the drain sections that came from
+ * elsewhere. */
if (child->role->detach) {
child->role->detach(child);
}
+ if (old_bs->quiesce_counter && child->role->drained_end) {
+ for (i = 0; i < old_bs->quiesce_counter; i++) {
+ child->role->drained_end(child);
+ }
+ }
QLIST_REMOVE(child, next_parent);
}
@@ -1987,9 +2017,14 @@ static void bdrv_replace_child_noperm(BdrvChild *child,
if (new_bs) {
QLIST_INSERT_HEAD(&new_bs->parents, child, next_parent);
if (new_bs->quiesce_counter && child->role->drained_begin) {
- child->role->drained_begin(child);
+ for (i = 0; i < new_bs->quiesce_counter; i++) {
+ child->role->drained_begin(child);
+ }
}
+ /* Attach only after starting new drained sections, so that recursive
+ * drain sections coming from @child don't get an extra .drained_begin
+ * callback. */
if (child->role->attach) {
child->role->attach(child);
}
@@ -2731,6 +2766,7 @@ BlockDriverState *bdrv_open(const char *filename, const char *reference,
* returns a pointer to bs_queue, which is either the newly allocated
* bs_queue, or the existing bs_queue being used.
*
+ * bs must be drained between bdrv_reopen_queue() and bdrv_reopen_multiple().
*/
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
BlockDriverState *bs,
@@ -2746,6 +2782,11 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
BdrvChild *child;
QDict *old_options, *explicit_options;
+ /* Make sure that the caller remembered to use a drained section. This is
+ * important to avoid graph changes between the recursive queuing here and
+ * bdrv_reopen_multiple(). */
+ assert(bs->quiesce_counter > 0);
+
if (bs_queue == NULL) {
bs_queue = g_new0(BlockReopenQueue, 1);
QSIMPLEQ_INIT(bs_queue);
@@ -2870,6 +2911,8 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
* If all devices prepare successfully, then the changes are committed
* to all devices.
*
+ * All affected nodes must be drained between bdrv_reopen_queue() and
+ * bdrv_reopen_multiple().
*/
int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp)
{
@@ -2879,11 +2922,8 @@ int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **er
assert(bs_queue != NULL);
- aio_context_release(ctx);
- bdrv_drain_all_begin();
- aio_context_acquire(ctx);
-
QSIMPLEQ_FOREACH(bs_entry, bs_queue, entry) {
+ assert(bs_entry->state.bs->quiesce_counter > 0);
if (bdrv_reopen_prepare(&bs_entry->state, bs_queue, &local_err)) {
error_propagate(errp, local_err);
goto cleanup;
@@ -2912,8 +2952,6 @@ cleanup:
}
g_free(bs_queue);
- bdrv_drain_all_end();
-
return ret;
}
@@ -2923,12 +2961,18 @@ int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp)
{
int ret = -1;
Error *local_err = NULL;
- BlockReopenQueue *queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
+ BlockReopenQueue *queue;
+
+ bdrv_subtree_drained_begin(bs);
+ queue = bdrv_reopen_queue(NULL, bs, NULL, bdrv_flags);
ret = bdrv_reopen_multiple(bdrv_get_aio_context(bs), queue, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
}
+
+ bdrv_subtree_drained_end(bs);
+
return ret;
}
@@ -4320,9 +4364,15 @@ int bdrv_inactivate_all(void)
BdrvNextIterator it;
int ret = 0;
int pass;
+ GSList *aio_ctxs = NULL, *ctx;
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
- aio_context_acquire(bdrv_get_aio_context(bs));
+ AioContext *aio_context = bdrv_get_aio_context(bs);
+
+ if (!g_slist_find(aio_ctxs, aio_context)) {
+ aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
+ aio_context_acquire(aio_context);
+ }
}
/* We do two passes of inactivation. The first pass calls to drivers'
@@ -4340,9 +4390,11 @@ int bdrv_inactivate_all(void)
}
out:
- for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
- aio_context_release(bdrv_get_aio_context(bs));
+ for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
+ AioContext *aio_context = ctx->data;
+ aio_context_release(aio_context);
}
+ g_slist_free(aio_ctxs);
return ret;
}
@@ -4593,10 +4645,11 @@ void bdrv_img_create(const char *filename, const char *fmt,
back_flags = flags;
back_flags &= ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
+ backing_options = qdict_new();
if (backing_fmt) {
- backing_options = qdict_new();
qdict_put_str(backing_options, "driver", backing_fmt);
}
+ qdict_put_bool(backing_options, BDRV_OPT_FORCE_SHARE, true);
bs = bdrv_open(full_backing, NULL, backing_options, back_flags,
&local_err);
@@ -4746,7 +4799,7 @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
AioContext *ctx = bdrv_get_aio_context(bs);
aio_disable_external(ctx);
- bdrv_parent_drained_begin(bs);
+ bdrv_parent_drained_begin(bs, NULL);
bdrv_drain(bs); /* ensure there are no in-flight requests */
while (aio_poll(ctx, false)) {
@@ -4760,7 +4813,7 @@ void bdrv_set_aio_context(BlockDriverState *bs, AioContext *new_context)
*/
aio_context_acquire(new_context);
bdrv_attach_aio_context(bs, new_context);
- bdrv_parent_drained_end(bs);
+ bdrv_parent_drained_end(bs, NULL);
aio_enable_external(ctx);
aio_context_release(new_context);
}
diff --git a/block/backup.c b/block/backup.c
index 99e6bcc748..4a16a37229 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -40,11 +40,12 @@ typedef struct BackupBlockJob {
BlockdevOnError on_target_error;
CoRwlock flush_rwlock;
uint64_t bytes_read;
- unsigned long *done_bitmap;
int64_t cluster_size;
bool compress;
NotifierWithReturn before_write;
QLIST_HEAD(, CowRequest) inflight_reqs;
+
+ HBitmap *copy_bitmap;
} BackupBlockJob;
/* See if in-flight requests overlap and wait for them to complete */
@@ -109,10 +110,11 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
cow_request_begin(&cow_request, job, start, end);
for (; start < end; start += job->cluster_size) {
- if (test_bit(start / job->cluster_size, job->done_bitmap)) {
+ if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) {
trace_backup_do_cow_skip(job, start);
continue; /* already copied */
}
+ hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
trace_backup_do_cow_process(job, start);
@@ -132,6 +134,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
if (error_is_read) {
*error_is_read = true;
}
+ hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
goto out;
}
@@ -148,11 +151,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
if (error_is_read) {
*error_is_read = false;
}
+ hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
goto out;
}
- set_bit(start / job->cluster_size, job->done_bitmap);
-
/* Publish progress, guest I/O counts as progress too. Note that the
* offset field is an opaque progress value, it is not a disk offset.
*/
@@ -260,7 +262,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
}
len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
- bitmap_zero(backup_job->done_bitmap, len);
+ hbitmap_set(backup_job->copy_bitmap, 0, len);
}
void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset,
@@ -360,64 +362,68 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job)
static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
{
+ int ret;
bool error_is_read;
- int ret = 0;
- int clusters_per_iter;
- uint32_t granularity;
- int64_t offset;
int64_t cluster;
- int64_t end;
- int64_t last_cluster = -1;
- BdrvDirtyBitmapIter *dbi;
+ HBitmapIter hbi;
- granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
- clusters_per_iter = MAX((granularity / job->cluster_size), 1);
- dbi = bdrv_dirty_iter_new(job->sync_bitmap);
+ hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
+ while ((cluster = hbitmap_iter_next(&hbi)) != -1) {
+ do {
+ if (yield_and_check(job)) {
+ return 0;
+ }
+ ret = backup_do_cow(job, cluster * job->cluster_size,
+ job->cluster_size, &error_is_read, false);
+ if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
+ BLOCK_ERROR_ACTION_REPORT)
+ {
+ return ret;
+ }
+ } while (ret < 0);
+ }
+
+ return 0;
+}
- /* Find the next dirty sector(s) */
- while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) {
- cluster = offset / job->cluster_size;
+/* init copy_bitmap from sync_bitmap */
+static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
+{
+ BdrvDirtyBitmapIter *dbi;
+ int64_t offset;
+ int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap),
+ job->cluster_size);
- /* Fake progress updates for any clusters we skipped */
- if (cluster != last_cluster + 1) {
- job->common.offset += ((cluster - last_cluster - 1) *
- job->cluster_size);
+ dbi = bdrv_dirty_iter_new(job->sync_bitmap);
+ while ((offset = bdrv_dirty_iter_next(dbi)) != -1) {
+ int64_t cluster = offset / job->cluster_size;
+ int64_t next_cluster;
+
+ offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap);
+ if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) {
+ hbitmap_set(job->copy_bitmap, cluster, end - cluster);
+ break;
}
- for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
- do {
- if (yield_and_check(job)) {
- goto out;
- }
- ret = backup_do_cow(job, cluster * job->cluster_size,
- job->cluster_size, &error_is_read,
- false);
- if ((ret < 0) &&
- backup_error_action(job, error_is_read, -ret) ==
- BLOCK_ERROR_ACTION_REPORT) {
- goto out;
- }
- } while (ret < 0);
+ offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset);
+ if (offset == -1) {
+ hbitmap_set(job->copy_bitmap, cluster, end - cluster);
+ break;
}
- /* If the bitmap granularity is smaller than the backup granularity,
- * we need to advance the iterator pointer to the next cluster. */
- if (granularity < job->cluster_size) {
- bdrv_set_dirty_iter(dbi, cluster * job->cluster_size);
+ next_cluster = DIV_ROUND_UP(offset, job->cluster_size);
+ hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster);
+ if (next_cluster >= end) {
+ break;
}
- last_cluster = cluster - 1;
+ bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
}
- /* Play some final catchup with the progress meter */
- end = DIV_ROUND_UP(job->common.len, job->cluster_size);
- if (last_cluster + 1 < end) {
- job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
- }
+ job->common.offset = job->common.len -
+ hbitmap_count(job->copy_bitmap) * job->cluster_size;
-out:
bdrv_dirty_iter_free(dbi);
- return ret;
}
static void coroutine_fn backup_run(void *opaque)
@@ -425,19 +431,27 @@ static void coroutine_fn backup_run(void *opaque)
BackupBlockJob *job = opaque;
BackupCompleteData *data;
BlockDriverState *bs = blk_bs(job->common.blk);
- int64_t offset;
+ int64_t offset, nb_clusters;
int ret = 0;
QLIST_INIT(&job->inflight_reqs);
qemu_co_rwlock_init(&job->flush_rwlock);
- job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len,
- job->cluster_size));
+ nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size);
+ job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
+ if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
+ backup_incremental_init_copy_bitmap(job);
+ } else {
+ hbitmap_set(job->copy_bitmap, 0, nb_clusters);
+ }
+
job->before_write.notify = backup_before_write_notify;
bdrv_add_before_write_notifier(bs, &job->before_write);
if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
+ /* All bits are set in copy_bitmap to allow any cluster to be copied.
+ * This does not actually require them to be copied. */
while (!block_job_is_cancelled(&job->common)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
@@ -512,7 +526,7 @@ static void coroutine_fn backup_run(void *opaque)
/* wait until pending backup_do_cow() calls have completed */
qemu_co_rwlock_wrlock(&job->flush_rwlock);
qemu_co_rwlock_unlock(&job->flush_rwlock);
- g_free(job->done_bitmap);
+ hbitmap_free(job->copy_bitmap);
data = g_malloc(sizeof(*data));
data->ret = ret;
diff --git a/block/commit.c b/block/commit.c
index c5327551ce..bb6c904704 100644
--- a/block/commit.c
+++ b/block/commit.c
@@ -277,7 +277,6 @@ void commit_start(const char *job_id, BlockDriverState *bs,
const char *filter_node_name, Error **errp)
{
CommitBlockJob *s;
- BlockReopenQueue *reopen_queue = NULL;
int orig_base_flags;
BlockDriverState *iter;
BlockDriverState *commit_top_bs = NULL;
@@ -299,12 +298,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
/* convert base to r/w, if necessary */
orig_base_flags = bdrv_get_flags(base);
if (!(orig_base_flags & BDRV_O_RDWR)) {
- reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
- orig_base_flags | BDRV_O_RDWR);
- }
-
- if (reopen_queue) {
- bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
+ bdrv_reopen(base, orig_base_flags | BDRV_O_RDWR, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
goto fail;
diff --git a/block/curl.c b/block/curl.c
index 2a244e2439..35cf417f59 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -89,6 +89,8 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle,
struct BDRVCURLState;
+static bool libcurl_initialized;
+
typedef struct CURLAIOCB {
Coroutine *co;
QEMUIOVector *qiov;
@@ -686,14 +688,23 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
double d;
const char *secretid;
const char *protocol_delimiter;
+ int ret;
- static int inited = 0;
if (flags & BDRV_O_RDWR) {
error_setg(errp, "curl block device does not support writes");
return -EROFS;
}
+ if (!libcurl_initialized) {
+ ret = curl_global_init(CURL_GLOBAL_ALL);
+ if (ret) {
+ error_setg(errp, "libcurl initialization failed with %d", ret);
+ return -EIO;
+ }
+ libcurl_initialized = true;
+ }
+
qemu_mutex_init(&s->mutex);
opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
qemu_opts_absorb_qdict(opts, options, &local_err);
@@ -772,11 +783,6 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
}
}
- if (!inited) {
- curl_global_init(CURL_GLOBAL_ALL);
- inited = 1;
- }
-
DPRINTF("CURL: Opening %s\n", file);
QSIMPLEQ_INIT(&s->free_state_waitq);
s->aio_context = bdrv_get_aio_context(bs);
@@ -851,6 +857,9 @@ out_noclean:
qemu_mutex_destroy(&s->mutex);
g_free(s->cookie);
g_free(s->url);
+ g_free(s->username);
+ g_free(s->proxyusername);
+ g_free(s->proxypassword);
qemu_opts_del(opts);
return -EINVAL;
}
@@ -949,6 +958,9 @@ static void curl_close(BlockDriverState *bs)
g_free(s->cookie);
g_free(s->url);
+ g_free(s->username);
+ g_free(s->proxyusername);
+ g_free(s->proxypassword);
}
static int64_t curl_getlength(BlockDriverState *bs)
diff --git a/block/dirty-bitmap.c b/block/dirty-bitmap.c
index bd04e991b1..7879d13ddb 100644
--- a/block/dirty-bitmap.c
+++ b/block/dirty-bitmap.c
@@ -715,3 +715,8 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp)
{
return hbitmap_sha256(bitmap->bitmap, errp);
}
+
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset)
+{
+ return hbitmap_next_zero(bitmap->bitmap, offset);
+}
diff --git a/block/dmg.h b/block/dmg.h
index b592d6fa8b..2ecf239ba5 100644
--- a/block/dmg.h
+++ b/block/dmg.h
@@ -26,7 +26,6 @@
#ifndef BLOCK_DMG_H
#define BLOCK_DMG_H
-#include "qemu/osdep.h"
#include "qemu-common.h"
#include "block/block_int.h"
#include <zlib.h>
diff --git a/block/io.c b/block/io.c
index 6773926fc1..7ea402352e 100644
--- a/block/io.c
+++ b/block/io.c
@@ -40,22 +40,28 @@
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int bytes, BdrvRequestFlags flags);
-void bdrv_parent_drained_begin(BlockDriverState *bs)
+void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
{
BdrvChild *c, *next;
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
+ if (c == ignore) {
+ continue;
+ }
if (c->role->drained_begin) {
c->role->drained_begin(c);
}
}
}
-void bdrv_parent_drained_end(BlockDriverState *bs)
+void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
{
BdrvChild *c, *next;
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
+ if (c == ignore) {
+ continue;
+ }
if (c->role->drained_end) {
c->role->drained_end(c);
}
@@ -134,29 +140,13 @@ void bdrv_disable_copy_on_read(BlockDriverState *bs)
assert(old >= 1);
}
-/* Check if any requests are in-flight (including throttled requests) */
-bool bdrv_requests_pending(BlockDriverState *bs)
-{
- BdrvChild *child;
-
- if (atomic_read(&bs->in_flight)) {
- return true;
- }
-
- QLIST_FOREACH(child, &bs->children, next) {
- if (bdrv_requests_pending(child->bs)) {
- return true;
- }
- }
-
- return false;
-}
-
typedef struct {
Coroutine *co;
BlockDriverState *bs;
bool done;
bool begin;
+ bool recursive;
+ BdrvChild *parent;
} BdrvCoDrainData;
static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
@@ -175,8 +165,10 @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
bdrv_wakeup(bs);
}
-static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
+/* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
+static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, bool recursive)
{
+ BdrvChild *child, *tmp;
BdrvCoDrainData data = { .bs = bs, .done = false, .begin = begin};
if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
@@ -187,16 +179,19 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin)
data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data);
bdrv_coroutine_enter(bs, data.co);
BDRV_POLL_WHILE(bs, !data.done);
+
+ if (recursive) {
+ QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
+ bdrv_drain_invoke(child->bs, begin, true);
+ }
+ }
}
-static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
+static bool bdrv_drain_recurse(BlockDriverState *bs)
{
BdrvChild *child, *tmp;
bool waited;
- /* Ensure any pending metadata writes are submitted to bs->file. */
- bdrv_drain_invoke(bs, begin);
-
/* Wait for drained requests to finish */
waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
@@ -215,7 +210,7 @@ static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
*/
bdrv_ref(bs);
}
- waited |= bdrv_drain_recurse(bs, begin);
+ waited |= bdrv_drain_recurse(bs);
if (in_main_loop) {
bdrv_unref(bs);
}
@@ -224,6 +219,11 @@ static bool bdrv_drain_recurse(BlockDriverState *bs, bool begin)
return waited;
}
+static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
+ BdrvChild *parent);
+static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
+ BdrvChild *parent);
+
static void bdrv_co_drain_bh_cb(void *opaque)
{
BdrvCoDrainData *data = opaque;
@@ -232,9 +232,9 @@ static void bdrv_co_drain_bh_cb(void *opaque)
bdrv_dec_in_flight(bs);
if (data->begin) {
- bdrv_drained_begin(bs);
+ bdrv_do_drained_begin(bs, data->recursive, data->parent);
} else {
- bdrv_drained_end(bs);
+ bdrv_do_drained_end(bs, data->recursive, data->parent);
}
data->done = true;
@@ -242,7 +242,8 @@ static void bdrv_co_drain_bh_cb(void *opaque)
}
static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
- bool begin)
+ bool begin, bool recursive,
+ BdrvChild *parent)
{
BdrvCoDrainData data;
@@ -256,6 +257,8 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
.bs = bs,
.done = false,
.begin = begin,
+ .recursive = recursive,
+ .parent = parent,
};
bdrv_inc_in_flight(bs);
aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
@@ -267,35 +270,97 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
assert(data.done);
}
-void bdrv_drained_begin(BlockDriverState *bs)
+void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
+ BdrvChild *parent)
{
+ BdrvChild *child, *next;
+
if (qemu_in_coroutine()) {
- bdrv_co_yield_to_drain(bs, true);
+ bdrv_co_yield_to_drain(bs, true, recursive, parent);
return;
}
+ /* Stop things in parent-to-child order */
if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
aio_disable_external(bdrv_get_aio_context(bs));
- bdrv_parent_drained_begin(bs);
}
- bdrv_drain_recurse(bs, true);
+ bdrv_parent_drained_begin(bs, parent);
+ bdrv_drain_invoke(bs, true, false);
+ bdrv_drain_recurse(bs);
+
+ if (recursive) {
+ bs->recursive_quiesce_counter++;
+ QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
+ bdrv_do_drained_begin(child->bs, true, child);
+ }
+ }
}
-void bdrv_drained_end(BlockDriverState *bs)
+void bdrv_drained_begin(BlockDriverState *bs)
{
+ bdrv_do_drained_begin(bs, false, NULL);
+}
+
+void bdrv_subtree_drained_begin(BlockDriverState *bs)
+{
+ bdrv_do_drained_begin(bs, true, NULL);
+}
+
+void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
+ BdrvChild *parent)
+{
+ BdrvChild *child, *next;
+ int old_quiesce_counter;
+
if (qemu_in_coroutine()) {
- bdrv_co_yield_to_drain(bs, false);
+ bdrv_co_yield_to_drain(bs, false, recursive, parent);
return;
}
assert(bs->quiesce_counter > 0);
- if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
- return;
+ old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter);
+
+ /* Re-enable things in child-to-parent order */
+ bdrv_drain_invoke(bs, false, false);
+ bdrv_parent_drained_end(bs, parent);
+ if (old_quiesce_counter == 1) {
+ aio_enable_external(bdrv_get_aio_context(bs));
}
- bdrv_parent_drained_end(bs);
- bdrv_drain_recurse(bs, false);
- aio_enable_external(bdrv_get_aio_context(bs));
+ if (recursive) {
+ bs->recursive_quiesce_counter--;
+ QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
+ bdrv_do_drained_end(child->bs, true, child);
+ }
+ }
+}
+
+void bdrv_drained_end(BlockDriverState *bs)
+{
+ bdrv_do_drained_end(bs, false, NULL);
+}
+
+void bdrv_subtree_drained_end(BlockDriverState *bs)
+{
+ bdrv_do_drained_end(bs, true, NULL);
+}
+
+void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
+{
+ int i;
+
+ for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
+ bdrv_do_drained_begin(child->bs, true, child);
+ }
+}
+
+void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
+{
+ int i;
+
+ for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
+ bdrv_do_drained_end(child->bs, true, child);
+ }
}
/*
@@ -342,14 +407,20 @@ void bdrv_drain_all_begin(void)
BdrvNextIterator it;
GSList *aio_ctxs = NULL, *ctx;
- block_job_pause_all();
+ /* BDRV_POLL_WHILE() for a node can only be called from its own I/O thread
+ * or the main loop AioContext. We potentially use BDRV_POLL_WHILE() on
+ * nodes in several different AioContexts, so make sure we're in the main
+ * context. */
+ assert(qemu_get_current_aio_context() == qemu_get_aio_context());
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
+ /* Stop things in parent-to-child order */
aio_context_acquire(aio_context);
- bdrv_parent_drained_begin(bs);
aio_disable_external(aio_context);
+ bdrv_parent_drained_begin(bs, NULL);
+ bdrv_drain_invoke(bs, true, true);
aio_context_release(aio_context);
if (!g_slist_find(aio_ctxs, aio_context)) {
@@ -372,7 +443,7 @@ void bdrv_drain_all_begin(void)
aio_context_acquire(aio_context);
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
if (aio_context == bdrv_get_aio_context(bs)) {
- waited |= bdrv_drain_recurse(bs, true);
+ waited |= bdrv_drain_recurse(bs);
}
}
aio_context_release(aio_context);
@@ -390,14 +461,13 @@ void bdrv_drain_all_end(void)
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
+ /* Re-enable things in child-to-parent order */
aio_context_acquire(aio_context);
+ bdrv_drain_invoke(bs, false, true);
+ bdrv_parent_drained_end(bs, NULL);
aio_enable_external(aio_context);
- bdrv_parent_drained_end(bs);
- bdrv_drain_recurse(bs, false);
aio_context_release(aio_context);
}
-
- block_job_resume_all();
}
void bdrv_drain_all(void)
diff --git a/block/iscsi.c b/block/iscsi.c
index 4683f3b244..5c0a9e55b6 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -2,7 +2,7 @@
* QEMU Block driver for iSCSI images
*
* Copyright (c) 2010-2011 Ronnie Sahlberg <ronniesahlberg@gmail.com>
- * Copyright (c) 2012-2016 Peter Lieven <pl@kamp.de>
+ * Copyright (c) 2012-2017 Peter Lieven <pl@kamp.de>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -104,6 +104,7 @@ typedef struct IscsiTask {
IscsiLun *iscsilun;
QEMUTimer retry_timer;
int err_code;
+ char *err_str;
} IscsiTask;
typedef struct IscsiAIOCB {
@@ -265,7 +266,7 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status,
}
}
iTask->err_code = iscsi_translate_sense(&task->sense);
- error_report("iSCSI Failure: %s", iscsi_get_error(iscsi));
+ iTask->err_str = g_strdup(iscsi_get_error(iscsi));
}
out:
@@ -629,6 +630,8 @@ retry:
if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors);
+ error_report("iSCSI WRITE10/16 failed at lba %" PRIu64 ": %s", lba,
+ iTask.err_str);
r = iTask.err_code;
goto out_unlock;
}
@@ -637,6 +640,7 @@ retry:
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
+ g_free(iTask.err_str);
return r;
}
@@ -651,10 +655,9 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
struct scsi_get_lba_status *lbas = NULL;
struct scsi_lba_status_descriptor *lbasd = NULL;
struct IscsiTask iTask;
+ uint64_t lba;
int64_t ret;
- iscsi_co_init_iscsitask(iscsilun, &iTask);
-
if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
ret = -EINVAL;
goto out;
@@ -670,11 +673,13 @@ static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
goto out;
}
+ lba = sector_qemu2lun(sector_num, iscsilun);
+
+ iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
retry:
if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
- sector_qemu2lun(sector_num, iscsilun),
- 8 + 16, iscsi_co_generic_cb,
+ lba, 8 + 16, iscsi_co_generic_cb,
&iTask) == NULL) {
ret = -ENOMEM;
goto out_unlock;
@@ -701,6 +706,8 @@ retry:
* because the device is busy or the cmd is not
* supported) we pretend all blocks are allocated
* for backwards compatibility */
+ error_report("iSCSI GET_LBA_STATUS failed at lba %" PRIu64 ": %s",
+ lba, iTask.err_str);
goto out_unlock;
}
@@ -738,6 +745,7 @@ retry:
}
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
+ g_free(iTask.err_str);
out:
if (iTask.task != NULL) {
scsi_free_scsi_task(iTask.task);
@@ -756,6 +764,7 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
struct IscsiTask iTask;
uint64_t lba;
uint32_t num_sectors;
+ int r = 0;
if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
return -EINVAL;
@@ -853,19 +862,23 @@ retry:
iTask.complete = 0;
goto retry;
}
- qemu_mutex_unlock(&iscsilun->mutex);
if (iTask.status != SCSI_STATUS_GOOD) {
- return iTask.err_code;
+ error_report("iSCSI READ10/16 failed at lba %" PRIu64 ": %s",
+ lba, iTask.err_str);
+ r = iTask.err_code;
}
- return 0;
+ qemu_mutex_unlock(&iscsilun->mutex);
+ g_free(iTask.err_str);
+ return r;
}
static int coroutine_fn iscsi_co_flush(BlockDriverState *bs)
{
IscsiLun *iscsilun = bs->opaque;
struct IscsiTask iTask;
+ int r = 0;
iscsi_co_init_iscsitask(iscsilun, &iTask);
qemu_mutex_lock(&iscsilun->mutex);
@@ -892,13 +905,15 @@ retry:
iTask.complete = 0;
goto retry;
}
- qemu_mutex_unlock(&iscsilun->mutex);
if (iTask.status != SCSI_STATUS_GOOD) {
- return iTask.err_code;
+ error_report("iSCSI SYNCHRONIZECACHE10 failed: %s", iTask.err_str);
+ r = iTask.err_code;
}
- return 0;
+ qemu_mutex_unlock(&iscsilun->mutex);
+ g_free(iTask.err_str);
+ return r;
}
#ifdef __linux__
@@ -1128,6 +1143,9 @@ retry:
goto retry;
}
+ iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
+ bytes >> BDRV_SECTOR_BITS);
+
if (iTask.status == SCSI_STATUS_CHECK_CONDITION) {
/* the target might fail with a check condition if it
is not happy with the alignment of the UNMAP request
@@ -1136,15 +1154,15 @@ retry:
}
if (iTask.status != SCSI_STATUS_GOOD) {
+ error_report("iSCSI UNMAP failed at lba %" PRIu64 ": %s",
+ list.lba, iTask.err_str);
r = iTask.err_code;
goto out_unlock;
}
- iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
- bytes >> BDRV_SECTOR_BITS);
-
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
+ g_free(iTask.err_str);
return r;
}
@@ -1241,6 +1259,8 @@ retry:
if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS,
bytes >> BDRV_SECTOR_BITS);
+ error_report("iSCSI WRITESAME10/16 failed at lba %" PRIu64 ": %s",
+ lba, iTask.err_str);
r = iTask.err_code;
goto out_unlock;
}
@@ -1255,6 +1275,7 @@ retry:
out_unlock:
qemu_mutex_unlock(&iscsilun->mutex);
+ g_free(iTask.err_str);
return r;
}
diff --git a/block/nbd.c b/block/nbd.c
index a50d24b50a..8b8ba56cdd 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -388,6 +388,7 @@ static QemuOptsList nbd_runtime_opts = {
.type = QEMU_OPT_STRING,
.help = "ID of the TLS credentials to use",
},
+ { /* end of list */ }
},
};
diff --git a/block/null.c b/block/null.c
index dd9c13f9ba..0cdabaa440 100644
--- a/block/null.c
+++ b/block/null.c
@@ -110,8 +110,7 @@ static coroutine_fn int null_co_common(BlockDriverState *bs)
BDRVNullState *s = bs->opaque;
if (s->latency_ns) {
- co_aio_sleep_ns(bdrv_get_aio_context(bs), QEMU_CLOCK_REALTIME,
- s->latency_ns);
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, s->latency_ns);
}
return 0;
}
diff --git a/block/qcow2.c b/block/qcow2.c
index 1914a940e5..4348b2c0c5 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1672,34 +1672,12 @@ static int64_t coroutine_fn qcow2_co_get_block_status(BlockDriverState *bs,
return status;
}
-/* handle reading after the end of the backing file */
-int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
- int64_t offset, int bytes)
-{
- uint64_t bs_size = bs->total_sectors * BDRV_SECTOR_SIZE;
- int n1;
-
- if ((offset + bytes) <= bs_size) {
- return bytes;
- }
-
- if (offset >= bs_size) {
- n1 = 0;
- } else {
- n1 = bs_size - offset;
- }
-
- qemu_iovec_memset(qiov, n1, 0, bytes - n1);
-
- return n1;
-}
-
static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
{
BDRVQcow2State *s = bs->opaque;
- int offset_in_cluster, n1;
+ int offset_in_cluster;
int ret;
unsigned int cur_bytes; /* number of bytes in current iteration */
uint64_t cluster_offset = 0;
@@ -1734,26 +1712,13 @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
case QCOW2_CLUSTER_UNALLOCATED:
if (bs->backing) {
- /* read from the base image */
- n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov,
- offset, cur_bytes);
- if (n1 > 0) {
- QEMUIOVector local_qiov;
-
- qemu_iovec_init(&local_qiov, hd_qiov.niov);
- qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1);
-
- BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
- qemu_co_mutex_unlock(&s->lock);
- ret = bdrv_co_preadv(bs->backing, offset, n1,
- &local_qiov, 0);
- qemu_co_mutex_lock(&s->lock);
-
- qemu_iovec_destroy(&local_qiov);
-
- if (ret < 0) {
- goto fail;
- }
+ BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
+ qemu_co_mutex_unlock(&s->lock);
+ ret = bdrv_co_preadv(bs->backing, offset, cur_bytes,
+ &hd_qiov, 0);
+ qemu_co_mutex_lock(&s->lock);
+ if (ret < 0) {
+ goto fail;
}
} else {
/* Note: in this case, no need to wait */
diff --git a/block/qcow2.h b/block/qcow2.h
index 6f0ff15dd0..46c8cf44ec 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -528,9 +528,6 @@ uint32_t offset_to_reftable_index(BDRVQcow2State *s, uint64_t offset)
}
/* qcow2.c functions */
-int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
- int64_t sector_num, int nb_sectors);
-
int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
int refcount_order, bool generous_increase,
uint64_t *refblock_count);
diff --git a/block/replication.c b/block/replication.c
index e41e293d2b..b1ea3caa4b 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -394,6 +394,9 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
new_secondary_flags = s->orig_secondary_flags;
}
+ bdrv_subtree_drained_begin(s->hidden_disk->bs);
+ bdrv_subtree_drained_begin(s->secondary_disk->bs);
+
if (orig_hidden_flags != new_hidden_flags) {
reopen_queue = bdrv_reopen_queue(reopen_queue, s->hidden_disk->bs, NULL,
new_hidden_flags);
@@ -409,6 +412,9 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
reopen_queue, &local_err);
error_propagate(errp, local_err);
}
+
+ bdrv_subtree_drained_end(s->hidden_disk->bs);
+ bdrv_subtree_drained_end(s->secondary_disk->bs);
}
static void backup_job_cleanup(BlockDriverState *bs)
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 696a71442a..f684477328 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -400,7 +400,7 @@ typedef struct BDRVSheepdogReopenState {
int cache_flags;
} BDRVSheepdogReopenState;
-static const char * sd_strerror(int err)
+static const char *sd_strerror(int err)
{
int i;
@@ -776,8 +776,7 @@ static coroutine_fn void reconnect_to_sdog(void *opaque)
if (s->fd < 0) {
DPRINTF("Wait for connection to be established\n");
error_report_err(local_err);
- co_aio_sleep_ns(bdrv_get_aio_context(s->bs), QEMU_CLOCK_REALTIME,
- 1000000000ULL);
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000000ULL);
}
};
@@ -1632,7 +1631,7 @@ static int sd_open(BlockDriverState *bs, QDict *options, int flags,
if (!tag) {
tag = "";
}
- if (tag && strlen(tag) >= SD_MAX_VDI_TAG_LEN) {
+ if (strlen(tag) >= SD_MAX_VDI_TAG_LEN) {
error_setg(errp, "value of parameter 'tag' is too long");
ret = -EINVAL;
goto err_no_fd;
@@ -3078,111 +3077,111 @@ static QemuOptsList sd_create_opts = {
};
static BlockDriver bdrv_sheepdog = {
- .format_name = "sheepdog",
- .protocol_name = "sheepdog",
- .instance_size = sizeof(BDRVSheepdogState),
- .bdrv_parse_filename = sd_parse_filename,
- .bdrv_file_open = sd_open,
- .bdrv_reopen_prepare = sd_reopen_prepare,
- .bdrv_reopen_commit = sd_reopen_commit,
- .bdrv_reopen_abort = sd_reopen_abort,
- .bdrv_close = sd_close,
- .bdrv_create = sd_create,
- .bdrv_has_zero_init = bdrv_has_zero_init_1,
- .bdrv_getlength = sd_getlength,
+ .format_name = "sheepdog",
+ .protocol_name = "sheepdog",
+ .instance_size = sizeof(BDRVSheepdogState),
+ .bdrv_parse_filename = sd_parse_filename,
+ .bdrv_file_open = sd_open,
+ .bdrv_reopen_prepare = sd_reopen_prepare,
+ .bdrv_reopen_commit = sd_reopen_commit,
+ .bdrv_reopen_abort = sd_reopen_abort,
+ .bdrv_close = sd_close,
+ .bdrv_create = sd_create,
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
+ .bdrv_getlength = sd_getlength,
.bdrv_get_allocated_file_size = sd_get_allocated_file_size,
- .bdrv_truncate = sd_truncate,
+ .bdrv_truncate = sd_truncate,
- .bdrv_co_readv = sd_co_readv,
- .bdrv_co_writev = sd_co_writev,
- .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
- .bdrv_co_pdiscard = sd_co_pdiscard,
- .bdrv_co_get_block_status = sd_co_get_block_status,
+ .bdrv_co_readv = sd_co_readv,
+ .bdrv_co_writev = sd_co_writev,
+ .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
+ .bdrv_co_pdiscard = sd_co_pdiscard,
+ .bdrv_co_get_block_status = sd_co_get_block_status,
- .bdrv_snapshot_create = sd_snapshot_create,
- .bdrv_snapshot_goto = sd_snapshot_goto,
- .bdrv_snapshot_delete = sd_snapshot_delete,
- .bdrv_snapshot_list = sd_snapshot_list,
+ .bdrv_snapshot_create = sd_snapshot_create,
+ .bdrv_snapshot_goto = sd_snapshot_goto,
+ .bdrv_snapshot_delete = sd_snapshot_delete,
+ .bdrv_snapshot_list = sd_snapshot_list,
- .bdrv_save_vmstate = sd_save_vmstate,
- .bdrv_load_vmstate = sd_load_vmstate,
+ .bdrv_save_vmstate = sd_save_vmstate,
+ .bdrv_load_vmstate = sd_load_vmstate,
- .bdrv_detach_aio_context = sd_detach_aio_context,
- .bdrv_attach_aio_context = sd_attach_aio_context,
+ .bdrv_detach_aio_context = sd_detach_aio_context,
+ .bdrv_attach_aio_context = sd_attach_aio_context,
- .create_opts = &sd_create_opts,
+ .create_opts = &sd_create_opts,
};
static BlockDriver bdrv_sheepdog_tcp = {
- .format_name = "sheepdog",
- .protocol_name = "sheepdog+tcp",
- .instance_size = sizeof(BDRVSheepdogState),
- .bdrv_parse_filename = sd_parse_filename,
- .bdrv_file_open = sd_open,
- .bdrv_reopen_prepare = sd_reopen_prepare,
- .bdrv_reopen_commit = sd_reopen_commit,
- .bdrv_reopen_abort = sd_reopen_abort,
- .bdrv_close = sd_close,
- .bdrv_create = sd_create,
- .bdrv_has_zero_init = bdrv_has_zero_init_1,
- .bdrv_getlength = sd_getlength,
+ .format_name = "sheepdog",
+ .protocol_name = "sheepdog+tcp",
+ .instance_size = sizeof(BDRVSheepdogState),
+ .bdrv_parse_filename = sd_parse_filename,
+ .bdrv_file_open = sd_open,
+ .bdrv_reopen_prepare = sd_reopen_prepare,
+ .bdrv_reopen_commit = sd_reopen_commit,
+ .bdrv_reopen_abort = sd_reopen_abort,
+ .bdrv_close = sd_close,
+ .bdrv_create = sd_create,
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
+ .bdrv_getlength = sd_getlength,
.bdrv_get_allocated_file_size = sd_get_allocated_file_size,
- .bdrv_truncate = sd_truncate,
+ .bdrv_truncate = sd_truncate,
- .bdrv_co_readv = sd_co_readv,
- .bdrv_co_writev = sd_co_writev,
- .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
- .bdrv_co_pdiscard = sd_co_pdiscard,
- .bdrv_co_get_block_status = sd_co_get_block_status,
+ .bdrv_co_readv = sd_co_readv,
+ .bdrv_co_writev = sd_co_writev,
+ .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
+ .bdrv_co_pdiscard = sd_co_pdiscard,
+ .bdrv_co_get_block_status = sd_co_get_block_status,
- .bdrv_snapshot_create = sd_snapshot_create,
- .bdrv_snapshot_goto = sd_snapshot_goto,
- .bdrv_snapshot_delete = sd_snapshot_delete,
- .bdrv_snapshot_list = sd_snapshot_list,
+ .bdrv_snapshot_create = sd_snapshot_create,
+ .bdrv_snapshot_goto = sd_snapshot_goto,
+ .bdrv_snapshot_delete = sd_snapshot_delete,
+ .bdrv_snapshot_list = sd_snapshot_list,
- .bdrv_save_vmstate = sd_save_vmstate,
- .bdrv_load_vmstate = sd_load_vmstate,
+ .bdrv_save_vmstate = sd_save_vmstate,
+ .bdrv_load_vmstate = sd_load_vmstate,
- .bdrv_detach_aio_context = sd_detach_aio_context,
- .bdrv_attach_aio_context = sd_attach_aio_context,
+ .bdrv_detach_aio_context = sd_detach_aio_context,
+ .bdrv_attach_aio_context = sd_attach_aio_context,
- .create_opts = &sd_create_opts,
+ .create_opts = &sd_create_opts,
};
static BlockDriver bdrv_sheepdog_unix = {
- .format_name = "sheepdog",
- .protocol_name = "sheepdog+unix",
- .instance_size = sizeof(BDRVSheepdogState),
- .bdrv_parse_filename = sd_parse_filename,
- .bdrv_file_open = sd_open,
- .bdrv_reopen_prepare = sd_reopen_prepare,
- .bdrv_reopen_commit = sd_reopen_commit,
- .bdrv_reopen_abort = sd_reopen_abort,
- .bdrv_close = sd_close,
- .bdrv_create = sd_create,
- .bdrv_has_zero_init = bdrv_has_zero_init_1,
- .bdrv_getlength = sd_getlength,
+ .format_name = "sheepdog",
+ .protocol_name = "sheepdog+unix",
+ .instance_size = sizeof(BDRVSheepdogState),
+ .bdrv_parse_filename = sd_parse_filename,
+ .bdrv_file_open = sd_open,
+ .bdrv_reopen_prepare = sd_reopen_prepare,
+ .bdrv_reopen_commit = sd_reopen_commit,
+ .bdrv_reopen_abort = sd_reopen_abort,
+ .bdrv_close = sd_close,
+ .bdrv_create = sd_create,
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
+ .bdrv_getlength = sd_getlength,
.bdrv_get_allocated_file_size = sd_get_allocated_file_size,
- .bdrv_truncate = sd_truncate,
+ .bdrv_truncate = sd_truncate,
- .bdrv_co_readv = sd_co_readv,
- .bdrv_co_writev = sd_co_writev,
- .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
- .bdrv_co_pdiscard = sd_co_pdiscard,
- .bdrv_co_get_block_status = sd_co_get_block_status,
+ .bdrv_co_readv = sd_co_readv,
+ .bdrv_co_writev = sd_co_writev,
+ .bdrv_co_flush_to_disk = sd_co_flush_to_disk,
+ .bdrv_co_pdiscard = sd_co_pdiscard,
+ .bdrv_co_get_block_status = sd_co_get_block_status,
- .bdrv_snapshot_create = sd_snapshot_create,
- .bdrv_snapshot_goto = sd_snapshot_goto,
- .bdrv_snapshot_delete = sd_snapshot_delete,
- .bdrv_snapshot_list = sd_snapshot_list,
+ .bdrv_snapshot_create = sd_snapshot_create,
+ .bdrv_snapshot_goto = sd_snapshot_goto,
+ .bdrv_snapshot_delete = sd_snapshot_delete,
+ .bdrv_snapshot_list = sd_snapshot_list,
- .bdrv_save_vmstate = sd_save_vmstate,
- .bdrv_load_vmstate = sd_load_vmstate,
+ .bdrv_save_vmstate = sd_save_vmstate,
+ .bdrv_load_vmstate = sd_load_vmstate,
- .bdrv_detach_aio_context = sd_detach_aio_context,
- .bdrv_attach_aio_context = sd_attach_aio_context,
+ .bdrv_detach_aio_context = sd_detach_aio_context,
+ .bdrv_attach_aio_context = sd_attach_aio_context,
- .create_opts = &sd_create_opts,
+ .create_opts = &sd_create_opts,
};
static void bdrv_sheepdog_init(void)
diff --git a/blockdev-nbd.c b/blockdev-nbd.c
index 28f551a7b0..9e3c22109c 100644
--- a/blockdev-nbd.c
+++ b/blockdev-nbd.c
@@ -18,10 +18,10 @@
#include "qmp-commands.h"
#include "block/nbd.h"
#include "io/channel-socket.h"
+#include "io/net-listener.h"
typedef struct NBDServerData {
- QIOChannelSocket *listen_ioc;
- int watch;
+ QIONetListener *listener;
QCryptoTLSCreds *tlscreds;
} NBDServerData;
@@ -32,27 +32,13 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored)
nbd_client_put(client);
}
-static gboolean nbd_accept(QIOChannel *ioc, GIOCondition condition,
- gpointer opaque)
+static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
+ gpointer opaque)
{
- QIOChannelSocket *cioc;
-
- if (!nbd_server) {
- return FALSE;
- }
-
- cioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc),
- NULL);
- if (!cioc) {
- return TRUE;
- }
-
qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server");
nbd_client_new(NULL, cioc,
nbd_server->tlscreds, NULL,
nbd_blockdev_client_closed);
- object_unref(OBJECT(cioc));
- return TRUE;
}
@@ -62,10 +48,8 @@ static void nbd_server_free(NBDServerData *server)
return;
}
- if (server->watch != -1) {
- g_source_remove(server->watch);
- }
- object_unref(OBJECT(server->listen_ioc));
+ qio_net_listener_disconnect(server->listener);
+ object_unref(OBJECT(server->listener));
if (server->tlscreds) {
object_unref(OBJECT(server->tlscreds));
}
@@ -112,12 +96,12 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
}
nbd_server = g_new0(NBDServerData, 1);
- nbd_server->watch = -1;
- nbd_server->listen_ioc = qio_channel_socket_new();
- qio_channel_set_name(QIO_CHANNEL(nbd_server->listen_ioc),
- "nbd-listener");
- if (qio_channel_socket_listen_sync(
- nbd_server->listen_ioc, addr, errp) < 0) {
+ nbd_server->listener = qio_net_listener_new();
+
+ qio_net_listener_set_name(nbd_server->listener,
+ "nbd-listener");
+
+ if (qio_net_listener_open_sync(nbd_server->listener, addr, errp) < 0) {
goto error;
}
@@ -134,12 +118,10 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds,
}
}
- nbd_server->watch = qio_channel_add_watch(
- QIO_CHANNEL(nbd_server->listen_ioc),
- G_IO_IN,
- nbd_accept,
- NULL,
- NULL);
+ qio_net_listener_set_client_func(nbd_server->listener,
+ nbd_accept,
+ NULL,
+ NULL);
return;
diff --git a/blockdev.c b/blockdev.c
index 56a6b24a0b..29d569a24e 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -45,6 +45,7 @@
#include "qapi/qmp/qerror.h"
#include "qapi/qobject-output-visitor.h"
#include "sysemu/sysemu.h"
+#include "sysemu/iothread.h"
#include "block/block_int.h"
#include "qmp-commands.h"
#include "block/trace.h"
@@ -734,10 +735,6 @@ QemuOptsList qemu_legacy_drive_opts = {
.type = QEMU_OPT_STRING,
.help = "chs translation (auto, lba, none)",
},{
- .name = "boot",
- .type = QEMU_OPT_BOOL,
- .help = "(deprecated, ignored)",
- },{
.name = "addr",
.type = QEMU_OPT_STRING,
.help = "pci address (virtio only)",
@@ -872,13 +869,6 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type)
goto fail;
}
- /* Deprecated option boot=[on|off] */
- if (qemu_opt_get(legacy_opts, "boot") != NULL) {
- fprintf(stderr, "qemu-kvm: boot=on|off is deprecated and will be "
- "ignored. Future versions will reject this parameter. Please "
- "update your scripts.\n");
- }
-
/* Other deprecated options */
if (!qtest_enabled()) {
for (i = 0; i < ARRAY_SIZE(deprecated); i++) {
@@ -1454,7 +1444,6 @@ struct BlkActionState {
typedef struct InternalSnapshotState {
BlkActionState common;
BlockDriverState *bs;
- AioContext *aio_context;
QEMUSnapshotInfo sn;
bool created;
} InternalSnapshotState;
@@ -1485,6 +1474,7 @@ static void internal_snapshot_prepare(BlkActionState *common,
qemu_timeval tv;
BlockdevSnapshotInternal *internal;
InternalSnapshotState *state;
+ AioContext *aio_context;
int ret1;
g_assert(common->action->type ==
@@ -1506,32 +1496,33 @@ static void internal_snapshot_prepare(BlkActionState *common,
return;
}
- /* AioContext is released in .clean() */
- state->aio_context = bdrv_get_aio_context(bs);
- aio_context_acquire(state->aio_context);
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
state->bs = bs;
+
+ /* Paired with .clean() */
bdrv_drained_begin(bs);
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) {
- return;
+ goto out;
}
if (bdrv_is_read_only(bs)) {
error_setg(errp, "Device '%s' is read only", device);
- return;
+ goto out;
}
if (!bdrv_can_snapshot(bs)) {
error_setg(errp, "Block format '%s' used by device '%s' "
"does not support internal snapshots",
bs->drv->format_name, device);
- return;
+ goto out;
}
if (!strlen(name)) {
error_setg(errp, "Name is empty");
- return;
+ goto out;
}
/* check whether a snapshot with name exist */
@@ -1539,12 +1530,12 @@ static void internal_snapshot_prepare(BlkActionState *common,
&local_err);
if (local_err) {
error_propagate(errp, local_err);
- return;
+ goto out;
} else if (ret) {
error_setg(errp,
"Snapshot with name '%s' already exists on device '%s'",
name, device);
- return;
+ goto out;
}
/* 3. take the snapshot */
@@ -1560,11 +1551,14 @@ static void internal_snapshot_prepare(BlkActionState *common,
error_setg_errno(errp, -ret1,
"Failed to create snapshot '%s' on device '%s'",
name, device);
- return;
+ goto out;
}
/* 4. succeed, mark a snapshot is created */
state->created = true;
+
+out:
+ aio_context_release(aio_context);
}
static void internal_snapshot_abort(BlkActionState *common)
@@ -1573,12 +1567,16 @@ static void internal_snapshot_abort(BlkActionState *common)
DO_UPCAST(InternalSnapshotState, common, common);
BlockDriverState *bs = state->bs;
QEMUSnapshotInfo *sn = &state->sn;
+ AioContext *aio_context;
Error *local_error = NULL;
if (!state->created) {
return;
}
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) {
error_reportf_err(local_error,
"Failed to delete snapshot with id '%s' and "
@@ -1586,19 +1584,26 @@ static void internal_snapshot_abort(BlkActionState *common)
sn->id_str, sn->name,
bdrv_get_device_name(bs));
}
+
+ aio_context_release(aio_context);
}
static void internal_snapshot_clean(BlkActionState *common)
{
InternalSnapshotState *state = DO_UPCAST(InternalSnapshotState,
common, common);
+ AioContext *aio_context;
- if (state->aio_context) {
- if (state->bs) {
- bdrv_drained_end(state->bs);
- }
- aio_context_release(state->aio_context);
+ if (!state->bs) {
+ return;
}
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
+ bdrv_drained_end(state->bs);
+
+ aio_context_release(aio_context);
}
/* external snapshot private data */
@@ -1606,7 +1611,6 @@ typedef struct ExternalSnapshotState {
BlkActionState common;
BlockDriverState *old_bs;
BlockDriverState *new_bs;
- AioContext *aio_context;
bool overlay_appended;
} ExternalSnapshotState;
@@ -1626,6 +1630,7 @@ static void external_snapshot_prepare(BlkActionState *common,
ExternalSnapshotState *state =
DO_UPCAST(ExternalSnapshotState, common, common);
TransactionAction *action = common->action;
+ AioContext *aio_context;
/* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar
* purpose but a different set of parameters */
@@ -1662,31 +1667,32 @@ static void external_snapshot_prepare(BlkActionState *common,
return;
}
- /* Acquire AioContext now so any threads operating on old_bs stop */
- state->aio_context = bdrv_get_aio_context(state->old_bs);
- aio_context_acquire(state->aio_context);
+ aio_context = bdrv_get_aio_context(state->old_bs);
+ aio_context_acquire(aio_context);
+
+ /* Paired with .clean() */
bdrv_drained_begin(state->old_bs);
if (!bdrv_is_inserted(state->old_bs)) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device);
- return;
+ goto out;
}
if (bdrv_op_is_blocked(state->old_bs,
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) {
- return;
+ goto out;
}
if (!bdrv_is_read_only(state->old_bs)) {
if (bdrv_flush(state->old_bs)) {
error_setg(errp, QERR_IO_ERROR);
- return;
+ goto out;
}
}
if (!bdrv_is_first_non_filter(state->old_bs)) {
error_setg(errp, QERR_FEATURE_DISABLED, "snapshot");
- return;
+ goto out;
}
if (action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_SYNC) {
@@ -1698,13 +1704,13 @@ static void external_snapshot_prepare(BlkActionState *common,
if (node_name && !snapshot_node_name) {
error_setg(errp, "New snapshot node name missing");
- return;
+ goto out;
}
if (snapshot_node_name &&
bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) {
error_setg(errp, "New snapshot node name already in use");
- return;
+ goto out;
}
flags = state->old_bs->open_flags;
@@ -1717,7 +1723,7 @@ static void external_snapshot_prepare(BlkActionState *common,
int64_t size = bdrv_getlength(state->old_bs);
if (size < 0) {
error_setg_errno(errp, -size, "bdrv_getlength failed");
- return;
+ goto out;
}
bdrv_img_create(new_image_file, format,
state->old_bs->filename,
@@ -1725,7 +1731,7 @@ static void external_snapshot_prepare(BlkActionState *common,
NULL, size, flags, false, &local_err);
if (local_err) {
error_propagate(errp, local_err);
- return;
+ goto out;
}
}
@@ -1740,30 +1746,30 @@ static void external_snapshot_prepare(BlkActionState *common,
errp);
/* We will manually add the backing_hd field to the bs later */
if (!state->new_bs) {
- return;
+ goto out;
}
if (bdrv_has_blk(state->new_bs)) {
error_setg(errp, "The snapshot is already in use");
- return;
+ goto out;
}
if (bdrv_op_is_blocked(state->new_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
errp)) {
- return;
+ goto out;
}
if (state->new_bs->backing != NULL) {
error_setg(errp, "The snapshot already has a backing image");
- return;
+ goto out;
}
if (!state->new_bs->drv->supports_backing) {
error_setg(errp, "The snapshot does not support backing images");
- return;
+ goto out;
}
- bdrv_set_aio_context(state->new_bs, state->aio_context);
+ bdrv_set_aio_context(state->new_bs, aio_context);
/* This removes our old bs and adds the new bs. This is an operation that
* can fail, so we need to do it in .prepare; undoing it for abort is
@@ -1772,15 +1778,22 @@ static void external_snapshot_prepare(BlkActionState *common,
bdrv_append(state->new_bs, state->old_bs, &local_err);
if (local_err) {
error_propagate(errp, local_err);
- return;
+ goto out;
}
state->overlay_appended = true;
+
+out:
+ aio_context_release(aio_context);
}
static void external_snapshot_commit(BlkActionState *common)
{
ExternalSnapshotState *state =
DO_UPCAST(ExternalSnapshotState, common, common);
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(state->old_bs);
+ aio_context_acquire(aio_context);
/* We don't need (or want) to use the transactional
* bdrv_reopen_multiple() across all the entries at once, because we
@@ -1789,6 +1802,8 @@ static void external_snapshot_commit(BlkActionState *common)
bdrv_reopen(state->old_bs, state->old_bs->open_flags & ~BDRV_O_RDWR,
NULL);
}
+
+ aio_context_release(aio_context);
}
static void external_snapshot_abort(BlkActionState *common)
@@ -1797,11 +1812,18 @@ static void external_snapshot_abort(BlkActionState *common)
DO_UPCAST(ExternalSnapshotState, common, common);
if (state->new_bs) {
if (state->overlay_appended) {
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(state->old_bs);
+ aio_context_acquire(aio_context);
+
bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd()
close state->old_bs; we need it */
bdrv_set_backing_hd(state->new_bs, NULL, &error_abort);
bdrv_replace_node(state->new_bs, state->old_bs, &error_abort);
bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */
+
+ aio_context_release(aio_context);
}
}
}
@@ -1810,17 +1832,24 @@ static void external_snapshot_clean(BlkActionState *common)
{
ExternalSnapshotState *state =
DO_UPCAST(ExternalSnapshotState, common, common);
- if (state->aio_context) {
- bdrv_drained_end(state->old_bs);
- aio_context_release(state->aio_context);
- bdrv_unref(state->new_bs);
+ AioContext *aio_context;
+
+ if (!state->old_bs) {
+ return;
}
+
+ aio_context = bdrv_get_aio_context(state->old_bs);
+ aio_context_acquire(aio_context);
+
+ bdrv_drained_end(state->old_bs);
+ bdrv_unref(state->new_bs);
+
+ aio_context_release(aio_context);
}
typedef struct DriveBackupState {
BlkActionState common;
BlockDriverState *bs;
- AioContext *aio_context;
BlockJob *job;
} DriveBackupState;
@@ -1832,6 +1861,7 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
BlockDriverState *bs;
DriveBackup *backup;
+ AioContext *aio_context;
Error *local_err = NULL;
assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP);
@@ -1842,24 +1872,36 @@ static void drive_backup_prepare(BlkActionState *common, Error **errp)
return;
}
- /* AioContext is released in .clean() */
- state->aio_context = bdrv_get_aio_context(bs);
- aio_context_acquire(state->aio_context);
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+
+ /* Paired with .clean() */
bdrv_drained_begin(bs);
+
state->bs = bs;
state->job = do_drive_backup(backup, common->block_job_txn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
- return;
+ goto out;
}
+
+out:
+ aio_context_release(aio_context);
}
static void drive_backup_commit(BlkActionState *common)
{
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
assert(state->job);
block_job_start(state->job);
+
+ aio_context_release(aio_context);
}
static void drive_backup_abort(BlkActionState *common)
@@ -1867,25 +1909,38 @@ static void drive_backup_abort(BlkActionState *common)
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
if (state->job) {
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
block_job_cancel_sync(state->job);
+
+ aio_context_release(aio_context);
}
}
static void drive_backup_clean(BlkActionState *common)
{
DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common);
+ AioContext *aio_context;
- if (state->aio_context) {
- bdrv_drained_end(state->bs);
- aio_context_release(state->aio_context);
+ if (!state->bs) {
+ return;
}
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
+ bdrv_drained_end(state->bs);
+
+ aio_context_release(aio_context);
}
typedef struct BlockdevBackupState {
BlkActionState common;
BlockDriverState *bs;
BlockJob *job;
- AioContext *aio_context;
} BlockdevBackupState;
static BlockJob *do_blockdev_backup(BlockdevBackup *backup, BlockJobTxn *txn,
@@ -1896,6 +1951,7 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
BlockdevBackup *backup;
BlockDriverState *bs, *target;
+ AioContext *aio_context;
Error *local_err = NULL;
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
@@ -1911,29 +1967,39 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
return;
}
- /* AioContext is released in .clean() */
- state->aio_context = bdrv_get_aio_context(bs);
- if (state->aio_context != bdrv_get_aio_context(target)) {
- state->aio_context = NULL;
+ aio_context = bdrv_get_aio_context(bs);
+ if (aio_context != bdrv_get_aio_context(target)) {
error_setg(errp, "Backup between two IO threads is not implemented");
return;
}
- aio_context_acquire(state->aio_context);
+ aio_context_acquire(aio_context);
state->bs = bs;
+
+ /* Paired with .clean() */
bdrv_drained_begin(state->bs);
state->job = do_blockdev_backup(backup, common->block_job_txn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
- return;
+ goto out;
}
+
+out:
+ aio_context_release(aio_context);
}
static void blockdev_backup_commit(BlkActionState *common)
{
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
assert(state->job);
block_job_start(state->job);
+
+ aio_context_release(aio_context);
}
static void blockdev_backup_abort(BlkActionState *common)
@@ -1941,25 +2007,38 @@ static void blockdev_backup_abort(BlkActionState *common)
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
if (state->job) {
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
block_job_cancel_sync(state->job);
+
+ aio_context_release(aio_context);
}
}
static void blockdev_backup_clean(BlkActionState *common)
{
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
+ AioContext *aio_context;
- if (state->aio_context) {
- bdrv_drained_end(state->bs);
- aio_context_release(state->aio_context);
+ if (!state->bs) {
+ return;
}
+
+ aio_context = bdrv_get_aio_context(state->bs);
+ aio_context_acquire(aio_context);
+
+ bdrv_drained_end(state->bs);
+
+ aio_context_release(aio_context);
}
typedef struct BlockDirtyBitmapState {
BlkActionState common;
BdrvDirtyBitmap *bitmap;
BlockDriverState *bs;
- AioContext *aio_context;
HBitmap *backup;
bool prepared;
} BlockDirtyBitmapState;
@@ -2038,7 +2117,6 @@ static void block_dirty_bitmap_clear_prepare(BlkActionState *common,
}
bdrv_clear_dirty_bitmap(state->bitmap, &state->backup);
- /* AioContext is released in .clean() */
}
static void block_dirty_bitmap_clear_abort(BlkActionState *common)
@@ -2059,16 +2137,6 @@ static void block_dirty_bitmap_clear_commit(BlkActionState *common)
hbitmap_free(state->backup);
}
-static void block_dirty_bitmap_clear_clean(BlkActionState *common)
-{
- BlockDirtyBitmapState *state = DO_UPCAST(BlockDirtyBitmapState,
- common, common);
-
- if (state->aio_context) {
- aio_context_release(state->aio_context);
- }
-}
-
static void abort_prepare(BlkActionState *common, Error **errp)
{
error_setg(errp, "Transaction aborted using Abort action");
@@ -2129,7 +2197,6 @@ static const BlkActionOps actions[] = {
.prepare = block_dirty_bitmap_clear_prepare,
.commit = block_dirty_bitmap_clear_commit,
.abort = block_dirty_bitmap_clear_abort,
- .clean = block_dirty_bitmap_clear_clean,
}
};
@@ -4052,6 +4119,47 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp)
return head;
}
+void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
+ bool has_force, bool force, Error **errp)
+{
+ AioContext *old_context;
+ AioContext *new_context;
+ BlockDriverState *bs;
+
+ bs = bdrv_find_node(node_name);
+ if (!bs) {
+ error_setg(errp, "Cannot find node %s", node_name);
+ return;
+ }
+
+ /* Protects against accidents. */
+ if (!(has_force && force) && bdrv_has_blk(bs)) {
+ error_setg(errp, "Node %s is associated with a BlockBackend and could "
+ "be in use (use force=true to override this check)",
+ node_name);
+ return;
+ }
+
+ if (iothread->type == QTYPE_QSTRING) {
+ IOThread *obj = iothread_by_id(iothread->u.s);
+ if (!obj) {
+ error_setg(errp, "Cannot find iothread %s", iothread->u.s);
+ return;
+ }
+
+ new_context = iothread_get_aio_context(obj);
+ } else {
+ new_context = qemu_get_aio_context();
+ }
+
+ old_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(old_context);
+
+ bdrv_set_aio_context(bs, new_context);
+
+ aio_context_release(old_context);
+}
+
QemuOptsList qemu_common_drive_opts = {
.name = "drive",
.head = QTAILQ_HEAD_INITIALIZER(qemu_common_drive_opts.head),
diff --git a/blockjob.c b/blockjob.c
index 715c2c2680..f5cea84e73 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -59,6 +59,7 @@ static void __attribute__((__constructor__)) block_job_init(void)
static void block_job_event_cancelled(BlockJob *job);
static void block_job_event_completed(BlockJob *job, const char *msg);
+static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
/* Transactional group of block jobs */
struct BlockJobTxn {
@@ -233,26 +234,23 @@ static char *child_job_get_parent_desc(BdrvChild *c)
job->id);
}
-static const BdrvChildRole child_job = {
- .get_parent_desc = child_job_get_parent_desc,
- .stay_at_node = true,
-};
-
-static void block_job_drained_begin(void *opaque)
+static void child_job_drained_begin(BdrvChild *c)
{
- BlockJob *job = opaque;
+ BlockJob *job = c->opaque;
block_job_pause(job);
}
-static void block_job_drained_end(void *opaque)
+static void child_job_drained_end(BdrvChild *c)
{
- BlockJob *job = opaque;
+ BlockJob *job = c->opaque;
block_job_resume(job);
}
-static const BlockDevOps block_job_dev_ops = {
- .drained_begin = block_job_drained_begin,
- .drained_end = block_job_drained_end,
+static const BdrvChildRole child_job = {
+ .get_parent_desc = child_job_get_parent_desc,
+ .drained_begin = child_job_drained_begin,
+ .drained_end = child_job_drained_end,
+ .stay_at_node = true,
};
void block_job_remove_all_bdrv(BlockJob *job)
@@ -480,9 +478,16 @@ static void block_job_completed_txn_success(BlockJob *job)
}
}
+/* Assumes the block_job_mutex is held */
+static bool block_job_timer_pending(BlockJob *job)
+{
+ return timer_pending(&job->sleep_timer);
+}
+
void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
{
Error *local_err = NULL;
+ int64_t old_speed = job->speed;
if (!job->driver->set_speed) {
error_setg(errp, QERR_UNSUPPORTED);
@@ -495,6 +500,12 @@ void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
}
job->speed = speed;
+ if (speed <= old_speed) {
+ return;
+ }
+
+ /* kick only if a timer is pending */
+ block_job_enter_cond(job, block_job_timer_pending);
}
void block_job_complete(BlockJob *job, Error **errp)
@@ -701,7 +712,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
bs->job = job;
- blk_set_dev_ops(blk, &block_job_dev_ops, job);
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
QLIST_INSERT_HEAD(&block_jobs, job, job_list);
@@ -821,7 +831,11 @@ void block_job_resume_all(void)
}
}
-void block_job_enter(BlockJob *job)
+/*
+ * Conditionally enter a block_job pending a call to fn() while
+ * under the block_job_lock critical section.
+ */
+static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
{
if (!block_job_started(job)) {
return;
@@ -836,6 +850,11 @@ void block_job_enter(BlockJob *job)
return;
}
+ if (fn && !fn(job)) {
+ block_job_unlock();
+ return;
+ }
+
assert(!job->deferred_to_main_loop);
timer_del(&job->sleep_timer);
job->busy = true;
@@ -843,6 +862,11 @@ void block_job_enter(BlockJob *job)
aio_co_wake(job->co);
}
+void block_job_enter(BlockJob *job)
+{
+ block_job_enter_cond(job, NULL);
+}
+
bool block_job_is_cancelled(BlockJob *job)
{
return job->cancelled;
diff --git a/bsd-user/main.c b/bsd-user/main.c
index f1b244b59b..efef5ff8c5 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -32,7 +32,6 @@
#include "qemu/envlist.h"
#include "exec/log.h"
#include "trace/control.h"
-#include "glib-compat.h"
int singlestep;
unsigned long mmap_min_addr;
diff --git a/chardev/baum.c b/chardev/baum.c
index 67fd783a59..78b0c87625 100644
--- a/chardev/baum.c
+++ b/chardev/baum.c
@@ -1,7 +1,7 @@
/*
* QEMU Baum Braille Device
*
- * Copyright (c) 2008, 2010-2011, 2016 Samuel Thibault
+ * Copyright (c) 2008, 2010-2011, 2016-2017 Samuel Thibault
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -239,6 +239,12 @@ static int baum_deferred_init(BaumChardev *baum)
brlapi_perror("baum: brlapi__getDisplaySize");
return 0;
}
+ if (baum->y > 1) {
+ baum->y = 1;
+ }
+ if (baum->x > 84) {
+ baum->x = 84;
+ }
con = qemu_console_lookup_by_index(0);
if (con && qemu_console_is_graphic(con)) {
diff --git a/chardev/char-mux.c b/chardev/char-mux.c
index 4cda5e7458..567bf965cd 100644
--- a/chardev/char-mux.c
+++ b/chardev/char-mux.c
@@ -123,6 +123,15 @@ static void mux_chr_send_event(MuxChardev *d, int mux_nr, int event)
}
}
+static void mux_chr_be_event(Chardev *chr, int event)
+{
+ MuxChardev *d = MUX_CHARDEV(chr);
+
+ if (d->focus != -1) {
+ mux_chr_send_event(d, d->focus, event);
+ }
+}
+
static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch)
{
if (d->term_got_escape) {
@@ -346,6 +355,7 @@ static void char_mux_class_init(ObjectClass *oc, void *data)
cc->chr_write = mux_chr_write;
cc->chr_accept_input = mux_chr_accept_input;
cc->chr_add_watch = mux_chr_add_watch;
+ cc->chr_be_event = mux_chr_be_event;
}
static const TypeInfo char_mux_type_info = {
diff --git a/chardev/char-socket.c b/chardev/char-socket.c
index 53eda8ef00..630a7f2995 100644
--- a/chardev/char-socket.c
+++ b/chardev/char-socket.c
@@ -25,6 +25,7 @@
#include "chardev/char.h"
#include "io/channel-socket.h"
#include "io/channel-tls.h"
+#include "io/net-listener.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qapi/clone-visitor.h"
@@ -40,8 +41,7 @@ typedef struct {
Chardev parent;
QIOChannel *ioc; /* Client I/O channel */
QIOChannelSocket *sioc; /* Client master channel */
- QIOChannelSocket *listen_ioc;
- guint listen_tag;
+ QIONetListener *listener;
QCryptoTLSCreds *tls_creds;
int connected;
int max_size;
@@ -93,9 +93,9 @@ static void check_report_connect_error(Chardev *chr,
qemu_chr_socket_restart_timer(chr);
}
-static gboolean tcp_chr_accept(QIOChannel *chan,
- GIOCondition cond,
- void *opaque);
+static void tcp_chr_accept(QIONetListener *listener,
+ QIOChannelSocket *cioc,
+ void *opaque);
static int tcp_chr_read_poll(void *opaque);
static void tcp_chr_disconnect(Chardev *chr);
@@ -401,9 +401,9 @@ static void tcp_chr_disconnect(Chardev *chr)
tcp_chr_free_connection(chr);
- if (s->listen_ioc && s->listen_tag == 0) {
- s->listen_tag = qio_channel_add_watch(
- QIO_CHANNEL(s->listen_ioc), G_IO_IN, tcp_chr_accept, chr, NULL);
+ if (s->listener) {
+ qio_net_listener_set_client_func(s->listener, tcp_chr_accept,
+ chr, NULL);
}
update_disconnected_filename(s);
if (emit_close) {
@@ -702,9 +702,8 @@ static int tcp_chr_new_client(Chardev *chr, QIOChannelSocket *sioc)
if (s->do_nodelay) {
qio_channel_set_delay(s->ioc, false);
}
- if (s->listen_tag) {
- g_source_remove(s->listen_tag);
- s->listen_tag = 0;
+ if (s->listener) {
+ qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
}
if (s->tls_creds) {
@@ -736,24 +735,14 @@ static int tcp_chr_add_client(Chardev *chr, int fd)
return ret;
}
-static gboolean tcp_chr_accept(QIOChannel *channel,
- GIOCondition cond,
- void *opaque)
+static void tcp_chr_accept(QIONetListener *listener,
+ QIOChannelSocket *cioc,
+ void *opaque)
{
Chardev *chr = CHARDEV(opaque);
- QIOChannelSocket *sioc;
-
- sioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(channel),
- NULL);
- if (!sioc) {
- return TRUE;
- }
-
- tcp_chr_new_client(chr, sioc);
- object_unref(OBJECT(sioc));
-
- return TRUE;
+ tcp_chr_set_client_ioc_name(chr, cioc);
+ tcp_chr_new_client(chr, cioc);
}
static int tcp_chr_wait_connected(Chardev *chr, Error **errp)
@@ -767,9 +756,10 @@ static int tcp_chr_wait_connected(Chardev *chr, Error **errp)
if (s->is_listen) {
info_report("QEMU waiting for connection on: %s",
chr->filename);
- qio_channel_set_blocking(QIO_CHANNEL(s->listen_ioc), true, NULL);
- tcp_chr_accept(QIO_CHANNEL(s->listen_ioc), G_IO_IN, chr);
- qio_channel_set_blocking(QIO_CHANNEL(s->listen_ioc), false, NULL);
+ sioc = qio_net_listener_wait_client(s->listener);
+ tcp_chr_set_client_ioc_name(chr, sioc);
+ tcp_chr_new_client(chr, sioc);
+ object_unref(OBJECT(sioc));
} else {
sioc = qio_channel_socket_new();
tcp_chr_set_client_ioc_name(chr, sioc);
@@ -797,12 +787,9 @@ static void char_socket_finalize(Object *obj)
s->reconnect_timer = 0;
}
qapi_free_SocketAddress(s->addr);
- if (s->listen_tag) {
- g_source_remove(s->listen_tag);
- s->listen_tag = 0;
- }
- if (s->listen_ioc) {
- object_unref(OBJECT(s->listen_ioc));
+ if (s->listener) {
+ qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL);
+ object_unref(OBJECT(s->listener));
}
if (s->tls_creds) {
object_unref(OBJECT(s->tls_creds));
@@ -935,29 +922,29 @@ static void qmp_chardev_open_socket(Chardev *chr,
} else {
if (s->is_listen) {
char *name;
- sioc = qio_channel_socket_new();
+ s->listener = qio_net_listener_new();
name = g_strdup_printf("chardev-tcp-listener-%s", chr->label);
- qio_channel_set_name(QIO_CHANNEL(sioc), name);
+ qio_net_listener_set_name(s->listener, name);
g_free(name);
- if (qio_channel_socket_listen_sync(sioc, s->addr, errp) < 0) {
+ if (qio_net_listener_open_sync(s->listener, s->addr, errp) < 0) {
+ object_unref(OBJECT(s->listener));
+ s->listener = NULL;
goto error;
}
qapi_free_SocketAddress(s->addr);
- s->addr = socket_local_address(sioc->fd, errp);
+ s->addr = socket_local_address(s->listener->sioc[0]->fd, errp);
update_disconnected_filename(s);
- s->listen_ioc = sioc;
if (is_waitconnect &&
qemu_chr_wait_connected(chr, errp) < 0) {
return;
}
if (!s->ioc) {
- s->listen_tag = qio_channel_add_watch(
- QIO_CHANNEL(s->listen_ioc), G_IO_IN,
- tcp_chr_accept, chr, NULL);
+ qio_net_listener_set_client_func(s->listener, tcp_chr_accept,
+ chr, NULL);
}
} else if (qemu_chr_wait_connected(chr, errp) < 0) {
goto error;
diff --git a/chardev/char.c b/chardev/char.c
index 2ae4f465ec..8c3765ee99 100644
--- a/chardev/char.c
+++ b/chardev/char.c
@@ -43,10 +43,19 @@ static Object *get_chardevs_root(void)
return container_get(object_get_root(), "/chardevs");
}
-void qemu_chr_be_event(Chardev *s, int event)
+static void chr_be_event(Chardev *s, int event)
{
CharBackend *be = s->be;
+ if (!be || !be->chr_event) {
+ return;
+ }
+
+ be->chr_event(be->opaque, event);
+}
+
+void qemu_chr_be_event(Chardev *s, int event)
+{
/* Keep track if the char device is open */
switch (event) {
case CHR_EVENT_OPENED:
@@ -57,11 +66,7 @@ void qemu_chr_be_event(Chardev *s, int event)
break;
}
- if (!be || !be->chr_event) {
- return;
- }
-
- be->chr_event(be->opaque, event);
+ CHARDEV_GET_CLASS(s)->chr_be_event(s, event);
}
/* Not reporting errors from writing to logfile, as logs are
@@ -244,6 +249,7 @@ static void char_class_init(ObjectClass *oc, void *data)
ChardevClass *cc = CHARDEV_CLASS(oc);
cc->chr_write = null_chr_write;
+ cc->chr_be_event = chr_be_event;
}
static void char_finalize(Object *obj)
diff --git a/chardev/wctablet.c b/chardev/wctablet.c
index 6c13c2c58a..969d014574 100644
--- a/chardev/wctablet.c
+++ b/chardev/wctablet.c
@@ -25,10 +25,6 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
-#include <stdlib.h>
-#include <string.h>
-#include <sys/time.h>
-#include <time.h>
#include "qemu/osdep.h"
#include "qemu-common.h"
diff --git a/configure b/configure
index 0c6e7572db..6a040821c6 100755
--- a/configure
+++ b/configure
@@ -211,6 +211,17 @@ supported_xen_target() {
return 1
}
+supported_hvf_target() {
+ test "$hvf" = "yes" || return 1
+ glob "$1" "*-softmmu" || return 1
+ case "${1%-softmmu}" in
+ x86_64)
+ return 0
+ ;;
+ esac
+ return 1
+}
+
supported_target() {
case "$1" in
*-softmmu)
@@ -236,6 +247,7 @@ supported_target() {
supported_kvm_target "$1" && return 0
supported_xen_target "$1" && return 0
supported_hax_target "$1" && return 0
+ supported_hvf_target "$1" && return 0
print_error "TCG disabled, but hardware accelerator not available for '$target'"
return 1
}
@@ -325,6 +337,7 @@ vhost_vsock="no"
vhost_user=""
kvm="no"
hax="no"
+hvf="no"
rdma=""
gprof="no"
debug_tcg="no"
@@ -426,6 +439,7 @@ vxhs=""
supported_cpu="no"
supported_os="no"
bogus_os="no"
+malloc_trim=""
# parse CC options first
for opt do
@@ -740,6 +754,7 @@ Darwin)
bsd="yes"
darwin="yes"
hax="yes"
+ hvf="yes"
LDFLAGS_SHARED="-bundle -undefined dynamic_lookup"
if [ "$cpu" = "x86_64" ] ; then
QEMU_CFLAGS="-arch x86_64 $QEMU_CFLAGS"
@@ -1035,6 +1050,10 @@ for opt do
;;
--enable-hax) hax="yes"
;;
+ --disable-hvf) hvf="no"
+ ;;
+ --enable-hvf) hvf="yes"
+ ;;
--disable-tcg-interpreter) tcg_interpreter="no"
;;
--enable-tcg-interpreter) tcg_interpreter="yes"
@@ -1047,6 +1066,10 @@ for opt do
;;
--enable-tcg) tcg="yes"
;;
+ --disable-malloc-trim) malloc_trim="no"
+ ;;
+ --enable-malloc-trim) malloc_trim="yes"
+ ;;
--disable-spice) spice="no"
;;
--enable-spice) spice="yes"
@@ -1466,6 +1489,7 @@ Advanced options (experts only):
Default:trace-<pid>
--disable-slirp disable SLIRP userspace network connectivity
--enable-tcg-interpreter enable TCG with bytecode interpreter (TCI)
+ --enable-malloc-trim enable libc malloc_trim() for memory optimization
--oss-lib path to OSS library
--cpu=CPU Build for host CPU [$cpu]
--with-coroutine=BACKEND coroutine backend. Supported options:
@@ -1523,6 +1547,7 @@ disabled with --disable-FEATURE, default is enabled if available:
bluez bluez stack connectivity
kvm KVM acceleration support
hax HAX acceleration support
+ hvf Hypervisor.framework acceleration support
rdma RDMA-based migration support
vde support for vde network
netmap support for netmap network
@@ -1582,6 +1607,20 @@ fi
# Suppress writing compiled files
python="$python -B"
+# Check that the C compiler works. Doing this here before testing
+# the host CPU ensures that we had a valid CC to autodetect the
+# $cpu var (and we should bail right here if that's not the case).
+# It also allows the help message to be printed without a CC.
+write_c_skeleton;
+if compile_object ; then
+ : C compiler works ok
+else
+ error_exit "\"$cc\" either does not exist or does not work"
+fi
+if ! compile_prog ; then
+ error_exit "\"$cc\" cannot build an executable (is your linker broken?)"
+fi
+
# Now we have handled --enable-tcg-interpreter and know we're not just
# printing the help message, bail out if the host CPU isn't supported.
if test "$ARCH" = "unknown"; then
@@ -1603,17 +1642,6 @@ if test -z "$werror" ; then
fi
fi
-# check that the C compiler works.
-write_c_skeleton;
-if compile_object ; then
- : C compiler works ok
-else
- error_exit "\"$cc\" either does not exist or does not work"
-fi
-if ! compile_prog ; then
- error_exit "\"$cc\" cannot build an executable (is your linker broken?)"
-fi
-
if test "$bogus_os" = "yes"; then
# Now that we know that we're not printing the help and that
# the compiler works (so the results of the check_defines we used
@@ -3857,6 +3885,30 @@ if test "$tcmalloc" = "yes" && test "$jemalloc" = "yes" ; then
exit 1
fi
+# Even if malloc_trim() is available, these non-libc memory allocators
+# do not support it.
+if test "$tcmalloc" = "yes" || test "$jemalloc" = "yes" ; then
+ if test "$malloc_trim" = "yes" ; then
+ echo "Disabling malloc_trim with non-libc memory allocator"
+ fi
+ malloc_trim="no"
+fi
+
+#######################################
+# malloc_trim
+
+if test "$malloc_trim" != "no" ; then
+ cat > $TMPC << EOF
+#include <malloc.h>
+int main(void) { malloc_trim(0); return 0; }
+EOF
+ if compile_prog "" "" ; then
+ malloc_trim="yes"
+ else
+ malloc_trim="no"
+ fi
+fi
+
##########################################
# tcmalloc probe
@@ -3920,7 +3972,7 @@ fi
# check if memfd is supported
memfd=no
cat > $TMPC << EOF
-#include <sys/memfd.h>
+#include <sys/mman.h>
int main(void)
{
@@ -5023,6 +5075,21 @@ fi
#################################################
+# Check to see if we have the Hypervisor framework
+if [ "$darwin" = "yes" ] ; then
+ cat > $TMPC << EOF
+#include <Hypervisor/hv.h>
+int main() { return 0;}
+EOF
+ if ! compile_object ""; then
+ hvf='no'
+ else
+ hvf='yes'
+ LDFLAGS="-framework Hypervisor $LDFLAGS"
+ fi
+fi
+
+#################################################
# Sparc implicitly links with --relax, which is
# incompatible with -r, so --no-relax should be
# given. It does no harm to give it on other
@@ -5497,11 +5564,13 @@ echo "ATTR/XATTR support $attr"
echo "Install blobs $blobs"
echo "KVM support $kvm"
echo "HAX support $hax"
+echo "HVF support $hvf"
echo "TCG support $tcg"
if test "$tcg" = "yes" ; then
echo "TCG debug enabled $debug_tcg"
echo "TCG interpreter $tcg_interpreter"
fi
+echo "malloc trim support $malloc_trim"
echo "RDMA support $rdma"
echo "fdt support $fdt"
echo "preadv support $preadv"
@@ -6012,6 +6081,10 @@ if test "$opengl" = "yes" ; then
fi
fi
+if test "$malloc_trim" = "yes" ; then
+ echo "CONFIG_MALLOC_TRIM=y" >> $config_host_mak
+fi
+
if test "$avx2_opt" = "yes" ; then
echo "CONFIG_AVX2_OPT=y" >> $config_host_mak
fi
@@ -6564,6 +6637,9 @@ fi
if supported_hax_target $target; then
echo "CONFIG_HAX=y" >> $config_target_mak
fi
+if supported_hvf_target $target; then
+ echo "CONFIG_HVF=y" >> $config_target_mak
+fi
if test "$target_bigendian" = "yes" ; then
echo "TARGET_WORDS_BIGENDIAN=y" >> $config_target_mak
fi
diff --git a/contrib/systemd/qemu-guest-agent.service b/contrib/systemd/qemu-guest-agent.service
new file mode 100644
index 0000000000..51cd7b37ff
--- /dev/null
+++ b/contrib/systemd/qemu-guest-agent.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=QEMU Guest Agent
+BindTo=dev-virtio\x2dports-org.qemu.guest_agent.0.device
+After=dev-virtio\x2dports-org.qemu.guest_agent.0.device
+
+[Service]
+ExecStart=-/usr/bin/qemu-ga
+Restart=always
+RestartSec=0
+
+[Install]
diff --git a/contrib/systemd/qemu-pr-helper.service b/contrib/systemd/qemu-pr-helper.service
new file mode 100644
index 0000000000..a1d27b0221
--- /dev/null
+++ b/contrib/systemd/qemu-pr-helper.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Persistent Reservation Daemon for QEMU
+
+[Service]
+WorkingDirectory=/tmp
+Type=simple
+ExecStart=/usr/bin/qemu-pr-helper
+PrivateTmp=yes
+ProtectSystem=strict
+ReadWritePaths=/var/run
+RestrictAddressFamilies=AF_UNIX
+Restart=always
+RestartSec=0
+
+[Install]
diff --git a/contrib/systemd/qemu-pr-helper.socket b/contrib/systemd/qemu-pr-helper.socket
new file mode 100644
index 0000000000..9d7c3e5e2c
--- /dev/null
+++ b/contrib/systemd/qemu-pr-helper.socket
@@ -0,0 +1,9 @@
+[Unit]
+Description=Persistent Reservation Daemon for QEMU
+
+[Socket]
+ListenStream=/run/qemu-pr-helper.sock
+SocketMode=0600
+
+[Install]
+WantedBy=multi-user.target
diff --git a/cpus.c b/cpus.c
index 114c29b6a0..e8139de534 100644
--- a/cpus.c
+++ b/cpus.c
@@ -37,6 +37,7 @@
#include "sysemu/hw_accel.h"
#include "sysemu/kvm.h"
#include "sysemu/hax.h"
+#include "sysemu/hvf.h"
#include "qmp-commands.h"
#include "exec/exec-all.h"
@@ -900,6 +901,10 @@ void cpu_synchronize_all_states(void)
CPU_FOREACH(cpu) {
cpu_synchronize_state(cpu);
+ /* TODO: move to cpu_synchronize_state() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_state(cpu);
+ }
}
}
@@ -909,6 +914,10 @@ void cpu_synchronize_all_post_reset(void)
CPU_FOREACH(cpu) {
cpu_synchronize_post_reset(cpu);
+ /* TODO: move to cpu_synchronize_post_reset() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_post_reset(cpu);
+ }
}
}
@@ -918,6 +927,10 @@ void cpu_synchronize_all_post_init(void)
CPU_FOREACH(cpu) {
cpu_synchronize_post_init(cpu);
+ /* TODO: move to cpu_synchronize_post_init() */
+ if (hvf_enabled()) {
+ hvf_cpu_synchronize_post_init(cpu);
+ }
}
}
@@ -1057,13 +1070,22 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu)
{
}
+static void qemu_cpu_stop(CPUState *cpu, bool exit)
+{
+ g_assert(qemu_cpu_is_self(cpu));
+ cpu->stop = false;
+ cpu->stopped = true;
+ if (exit) {
+ cpu_exit(cpu);
+ }
+ qemu_cond_broadcast(&qemu_pause_cond);
+}
+
static void qemu_wait_io_event_common(CPUState *cpu)
{
atomic_mb_set(&cpu->thread_kicked, false);
if (cpu->stop) {
- cpu->stop = false;
- cpu->stopped = true;
- qemu_cond_broadcast(&qemu_pause_cond);
+ qemu_cpu_stop(cpu, false);
}
process_queued_cpu_work(cpu);
}
@@ -1098,6 +1120,14 @@ static void qemu_kvm_wait_io_event(CPUState *cpu)
qemu_wait_io_event_common(cpu);
}
+static void qemu_hvf_wait_io_event(CPUState *cpu)
+{
+ while (cpu_thread_is_idle(cpu)) {
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ }
+ qemu_wait_io_event_common(cpu);
+}
+
static void *qemu_kvm_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
@@ -1435,6 +1465,48 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
return NULL;
}
+/* The HVF-specific vCPU thread function. This one should only run when the host
+ * CPU supports the VMX "unrestricted guest" feature. */
+static void *qemu_hvf_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+
+ int r;
+
+ assert(hvf_enabled());
+
+ rcu_register_thread();
+
+ qemu_mutex_lock_iothread();
+ qemu_thread_get_self(cpu->thread);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->can_do_io = 1;
+ current_cpu = cpu;
+
+ hvf_init_vcpu(cpu);
+
+ /* signal CPU creation */
+ cpu->created = true;
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ do {
+ if (cpu_can_run(cpu)) {
+ r = hvf_vcpu_exec(cpu);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+ qemu_hvf_wait_io_event(cpu);
+ } while (!cpu->unplug || cpu_can_run(cpu));
+
+ hvf_vcpu_destroy(cpu);
+ cpu->created = false;
+ qemu_cond_signal(&qemu_cpu_cond);
+ qemu_mutex_unlock_iothread();
+ return NULL;
+}
+
#ifdef _WIN32
static void CALLBACK dummy_apc_func(ULONG_PTR unused)
{
@@ -1610,12 +1682,12 @@ void pause_all_vcpus(void)
qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
CPU_FOREACH(cpu) {
- cpu->stop = true;
- qemu_cpu_kick(cpu);
- }
-
- if (qemu_in_vcpu_thread()) {
- cpu_stop_current();
+ if (qemu_cpu_is_self(cpu)) {
+ qemu_cpu_stop(cpu, true);
+ } else {
+ cpu->stop = true;
+ qemu_cpu_kick(cpu);
+ }
}
while (!all_vcpus_paused()) {
@@ -1752,6 +1824,27 @@ static void qemu_kvm_start_vcpu(CPUState *cpu)
}
}
+static void qemu_hvf_start_vcpu(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ /* HVF currently does not support TCG, and only runs in
+ * unrestricted-guest mode. */
+ assert(hvf_enabled());
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
+ cpu->cpu_index);
+ qemu_thread_create(cpu->thread, thread_name, qemu_hvf_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
+}
+
static void qemu_dummy_start_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
@@ -1778,17 +1871,16 @@ void qemu_init_vcpu(CPUState *cpu)
/* If the target cpu hasn't set up any address spaces itself,
* give it the default one.
*/
- AddressSpace *as = g_new0(AddressSpace, 1);
-
- address_space_init(as, cpu->memory, "cpu-memory");
cpu->num_ases = 1;
- cpu_address_space_init(cpu, as, 0);
+ cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
}
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
} else if (hax_enabled()) {
qemu_hax_start_vcpu(cpu);
+ } else if (hvf_enabled()) {
+ qemu_hvf_start_vcpu(cpu);
} else if (tcg_enabled()) {
qemu_tcg_init_vcpu(cpu);
} else {
@@ -1799,10 +1891,7 @@ void qemu_init_vcpu(CPUState *cpu)
void cpu_stop_current(void)
{
if (current_cpu) {
- current_cpu->stop = false;
- current_cpu->stopped = true;
- cpu_exit(current_cpu);
- qemu_cond_broadcast(&qemu_pause_cond);
+ qemu_cpu_stop(current_cpu, true);
}
}
diff --git a/default-configs/arm-softmmu.mak b/default-configs/arm-softmmu.mak
index d37edc4312..b0d6e65038 100644
--- a/default-configs/arm-softmmu.mak
+++ b/default-configs/arm-softmmu.mak
@@ -130,5 +130,5 @@ CONFIG_SMBIOS=y
CONFIG_ASPEED_SOC=y
CONFIG_GPIO_KEY=y
CONFIG_MSF2=y
-
CONFIG_FW_CFG_DMA=y
+CONFIG_XILINX_AXI=y
diff --git a/disas/arm.c b/disas/arm.c
index 9967c45990..dda7b2a943 100644
--- a/disas/arm.c
+++ b/disas/arm.c
@@ -1662,7 +1662,7 @@ print_insn_coprocessor (bfd_vma pc, struct disassemble_info *info, long given,
}
else
{
- /* Only match unconditional instuctions against unconditional
+ /* Only match unconditional instructions against unconditional
patterns. */
if ((given & 0xf0000000) == 0xf0000000)
{
diff --git a/disas/nios2.c b/disas/nios2.c
index b342936d21..de11f04cc4 100644
--- a/disas/nios2.c
+++ b/disas/nios2.c
@@ -1756,7 +1756,6 @@ extern const int nios2_num_r2_reg_range_mappings;
#endif /* _NIOS2_H */
/*#include "sysdep.h"
-#include <stdio.h>
#include "opcode/nios2.h"
*/
/* Register string table */
@@ -2521,8 +2520,6 @@ const int nios2_num_r2_reg_range_mappings = 8;
#include "dis-asm.h"
#include "opcode/nios2.h"
#include "libiberty.h"
-#include <string.h>
-#include <assert.h>
*/
/* No symbol table is available when this code runs out in an embedded
system as when it is used for disassembler support in a monitor. */
diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt
index e4d340bbb7..4f9012d154 100644
--- a/docs/devel/multiple-iothreads.txt
+++ b/docs/devel/multiple-iothreads.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2014 Red Hat Inc.
+Copyright (c) 2014-2017 Red Hat Inc.
This work is licensed under the terms of the GNU GPL, version 2 or later. See
the COPYING file in the top-level directory.
@@ -92,8 +92,9 @@ aio_context_acquire()/aio_context_release() for mutual exclusion. Once the
context is acquired no other thread can access it or run event loop iterations
in this AioContext.
-aio_context_acquire()/aio_context_release() calls may be nested. This
-means you can call them if you're not sure whether #2 applies.
+Legacy code sometimes nests aio_context_acquire()/aio_context_release() calls.
+Do not use nesting anymore, it is incompatible with the BDRV_POLL_WHILE() macro
+used in the block layer and can lead to hangs.
There is currently no lock ordering rule if a thread needs to acquire multiple
AioContexts simultaneously. Therefore, it is only safe for code holding the
diff --git a/docs/devel/qapi-code-gen.txt b/docs/devel/qapi-code-gen.txt
index f04c63fe82..06ab699066 100644
--- a/docs/devel/qapi-code-gen.txt
+++ b/docs/devel/qapi-code-gen.txt
@@ -63,7 +63,7 @@ Comment text starting with '=' is a section title:
Double the '=' for a subsection title:
- # == Subection title
+ # == Subsection title
'|' denotes examples:
diff --git a/dump.c b/dump.c
index d4a8c942eb..e9dfed060a 100644
--- a/dump.c
+++ b/dump.c
@@ -788,12 +788,7 @@ static bool note_name_equal(DumpState *s,
get_note_sizes(s, note, &head_size, &name_size, NULL);
head_size = ROUND_UP(head_size, 4);
- if (name_size != len ||
- memcmp(note + head_size, "VMCOREINFO", len)) {
- return false;
- }
-
- return true;
+ return name_size == len && memcmp(note + head_size, name, len) == 0;
}
/* write common header, sub header and elf note to vmcore */
diff --git a/exec.c b/exec.c
index 03238a3449..4722e521d4 100644
--- a/exec.c
+++ b/exec.c
@@ -18,8 +18,6 @@
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
-#ifndef _WIN32
-#endif
#include "qemu/cutils.h"
#include "cpu.h"
@@ -51,7 +49,6 @@
#include "trace-root.h"
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
-#include <fcntl.h>
#include <linux/falloc.h>
#endif
@@ -708,9 +705,17 @@ CPUState *qemu_get_cpu(int index)
}
#if !defined(CONFIG_USER_ONLY)
-void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr)
{
CPUAddressSpace *newas;
+ AddressSpace *as = g_new0(AddressSpace, 1);
+ char *as_name;
+
+ assert(mr);
+ as_name = g_strdup_printf("%s-%d", prefix, cpu->cpu_index);
+ address_space_init(as, mr, as_name);
+ g_free(as_name);
/* Target code should have set num_ases before calling us */
assert(asidx < cpu->num_ases);
@@ -2720,6 +2725,37 @@ static uint16_t dummy_section(PhysPageMap *map, FlatView *fv, MemoryRegion *mr)
return phys_section_add(map, &section);
}
+static void readonly_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ /* Ignore any write to ROM. */
+}
+
+static bool readonly_mem_accepts(void *opaque, hwaddr addr,
+ unsigned size, bool is_write)
+{
+ return is_write;
+}
+
+/* This will only be used for writes, because reads are special cased
+ * to directly access the underlying host ram.
+ */
+static const MemoryRegionOps readonly_mem_ops = {
+ .write = readonly_mem_write,
+ .valid.accepts = readonly_mem_accepts,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ .unaligned = false,
+ },
+};
+
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
@@ -2732,7 +2768,8 @@ MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
static void io_mem_init(void)
{
- memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
+ memory_region_init_io(&io_mem_rom, NULL, &readonly_mem_ops,
+ NULL, NULL, UINT64_MAX);
memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
NULL, UINT64_MAX);
diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h
index 474c79d003..3fa062b39f 100644
--- a/fsdev/file-op-9p.h
+++ b/fsdev/file-op-9p.h
@@ -22,22 +22,19 @@
#define SM_LOCAL_MODE_BITS 0600
#define SM_LOCAL_DIR_MODE_BITS 0700
-typedef struct FsCred
-{
+typedef struct FsCred {
uid_t fc_uid;
gid_t fc_gid;
mode_t fc_mode;
dev_t fc_rdev;
} FsCred;
-struct xattr_operations;
-struct FsContext;
-struct V9fsPath;
+typedef struct FsContext FsContext;
+typedef struct V9fsPath V9fsPath;
-typedef struct extended_ops {
- int (*get_st_gen)(struct FsContext *, struct V9fsPath *,
- mode_t, uint64_t *);
-} extended_ops;
+typedef struct ExtendedOps {
+ int (*get_st_gen)(FsContext *, V9fsPath *, mode_t, uint64_t *);
+} ExtendedOps;
/* export flags */
#define V9FS_IMMEDIATE_WRITEOUT 0x00000001
@@ -67,6 +64,8 @@ typedef struct extended_ops {
typedef struct FileOperations FileOperations;
+typedef struct XattrOperations XattrOperations;
+
/*
* Structure to store the various fsdev's passed through command line.
*/
@@ -80,24 +79,23 @@ typedef struct FsDriverEntry {
mode_t dmode;
} FsDriverEntry;
-typedef struct FsContext
-{
+struct FsContext {
uid_t uid;
char *fs_root;
int export_flags;
- struct xattr_operations **xops;
- struct extended_ops exops;
+ XattrOperations **xops;
+ ExtendedOps exops;
FsThrottle *fst;
/* fs driver specific data */
void *private;
mode_t fmode;
mode_t dmode;
-} FsContext;
+};
-typedef struct V9fsPath {
+struct V9fsPath {
uint16_t size;
char *data;
-} V9fsPath;
+};
typedef union V9fsFidOpenState V9fsFidOpenState;
@@ -105,9 +103,9 @@ void cred_init(FsCred *);
struct FileOperations
{
- int (*parse_opts)(QemuOpts *, struct FsDriverEntry *);
- int (*init)(struct FsContext *);
- void (*cleanup)(struct FsContext *);
+ int (*parse_opts)(QemuOpts *, FsDriverEntry *, Error **errp);
+ int (*init)(FsContext *, Error **errp);
+ void (*cleanup)(FsContext *);
int (*lstat)(FsContext *, V9fsPath *, struct stat *);
ssize_t (*readlink)(FsContext *, V9fsPath *, char *, size_t);
int (*chmod)(FsContext *, V9fsPath *, FsCred *);
diff --git a/fsdev/qemu-fsdev.c b/fsdev/qemu-fsdev.c
index 266e442b87..941e309657 100644
--- a/fsdev/qemu-fsdev.c
+++ b/fsdev/qemu-fsdev.c
@@ -37,6 +37,7 @@ int qemu_fsdev_add(QemuOpts *opts)
const char *fsdriver = qemu_opt_get(opts, "fsdriver");
const char *writeout = qemu_opt_get(opts, "writeout");
bool ro = qemu_opt_get_bool(opts, "readonly", 0);
+ Error *local_err = NULL;
if (!fsdev_id) {
error_report("fsdev: No id specified");
@@ -74,7 +75,8 @@ int qemu_fsdev_add(QemuOpts *opts)
}
if (fsle->fse.ops->parse_opts) {
- if (fsle->fse.ops->parse_opts(opts, &fsle->fse)) {
+ if (fsle->fse.ops->parse_opts(opts, &fsle->fse, &local_err)) {
+ error_report_err(local_err);
g_free(fsle->fse.fsdev_id);
g_free(fsle);
return -1;
diff --git a/gdbstub.c b/gdbstub.c
index 2a94030d3b..f1d51480f7 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -21,6 +21,7 @@
#include "qemu/error-report.h"
#include "qemu/cutils.h"
#include "cpu.h"
+#include "trace-root.h"
#ifdef CONFIG_USER_ONLY
#include "qemu.h"
#else
@@ -287,21 +288,6 @@ static int gdb_signal_to_target (int sig)
return -1;
}
-/* #define DEBUG_GDB */
-
-#ifdef DEBUG_GDB
-# define DEBUG_GDB_GATE 1
-#else
-# define DEBUG_GDB_GATE 0
-#endif
-
-#define gdb_debug(fmt, ...) do { \
- if (DEBUG_GDB_GATE) { \
- fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
- } \
-} while (0)
-
-
typedef struct GDBRegisterState {
int base_reg;
int num_regs;
@@ -410,10 +396,13 @@ int use_gdb_syscalls(void)
/* Resume execution. */
static inline void gdb_continue(GDBState *s)
{
+
#ifdef CONFIG_USER_ONLY
s->running_state = 1;
+ trace_gdbstub_op_continue();
#else
if (!runstate_needs_reset()) {
+ trace_gdbstub_op_continue();
vm_start();
}
#endif
@@ -434,6 +423,7 @@ static int gdb_continue_partial(GDBState *s, char *newstates)
*/
CPU_FOREACH(cpu) {
if (newstates[cpu->cpu_index] == 's') {
+ trace_gdbstub_op_stepping(cpu->cpu_index);
cpu_single_step(cpu, sstep_flags);
}
}
@@ -452,11 +442,13 @@ static int gdb_continue_partial(GDBState *s, char *newstates)
case 1:
break; /* nothing to do here */
case 's':
+ trace_gdbstub_op_stepping(cpu->cpu_index);
cpu_single_step(cpu, sstep_flags);
cpu_resume(cpu);
flag = 1;
break;
case 'c':
+ trace_gdbstub_op_continue_cpu(cpu->cpu_index);
cpu_resume(cpu);
flag = 1;
break;
@@ -538,12 +530,49 @@ static void hextomem(uint8_t *mem, const char *buf, int len)
}
}
+static void hexdump(const char *buf, int len,
+ void (*trace_fn)(size_t ofs, char const *text))
+{
+ char line_buffer[3 * 16 + 4 + 16 + 1];
+
+ size_t i;
+ for (i = 0; i < len || (i & 0xF); ++i) {
+ size_t byte_ofs = i & 15;
+
+ if (byte_ofs == 0) {
+ memset(line_buffer, ' ', 3 * 16 + 4 + 16);
+ line_buffer[3 * 16 + 4 + 16] = 0;
+ }
+
+ size_t col_group = (i >> 2) & 3;
+ size_t hex_col = byte_ofs * 3 + col_group;
+ size_t txt_col = 3 * 16 + 4 + byte_ofs;
+
+ if (i < len) {
+ char value = buf[i];
+
+ line_buffer[hex_col + 0] = tohex((value >> 4) & 0xF);
+ line_buffer[hex_col + 1] = tohex((value >> 0) & 0xF);
+ line_buffer[txt_col + 0] = (value >= ' ' && value < 127)
+ ? value
+ : '.';
+ }
+
+ if (byte_ofs == 0xF)
+ trace_fn(i & -16, line_buffer);
+ }
+}
+
/* return -1 if error, 0 if OK */
-static int put_packet_binary(GDBState *s, const char *buf, int len)
+static int put_packet_binary(GDBState *s, const char *buf, int len, bool dump)
{
int csum, i;
uint8_t *p;
+ if (dump && trace_event_get_state_backends(TRACE_GDBSTUB_IO_BINARYREPLY)) {
+ hexdump(buf, len, trace_gdbstub_io_binaryreply);
+ }
+
for(;;) {
p = s->last_packet;
*(p++) = '$';
@@ -576,9 +605,9 @@ static int put_packet_binary(GDBState *s, const char *buf, int len)
/* return -1 if error, 0 if OK */
static int put_packet(GDBState *s, const char *buf)
{
- gdb_debug("reply='%s'\n", buf);
+ trace_gdbstub_io_reply(buf);
- return put_packet_binary(s, buf, strlen(buf));
+ return put_packet_binary(s, buf, strlen(buf), false);
}
/* Encode data using the encoding for 'x' packets. */
@@ -975,8 +1004,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
uint8_t *registers;
target_ulong addr, len;
-
- gdb_debug("command='%s'\n", line_buf);
+ trace_gdbstub_io_command(line_buf);
p = line_buf;
ch = *p++;
@@ -999,7 +1027,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
}
s->signal = 0;
gdb_continue(s);
- return RS_IDLE;
+ return RS_IDLE;
case 'C':
s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
if (s->signal == -1)
@@ -1045,7 +1073,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
}
cpu_single_step(s->c_cpu, sstep_flags);
gdb_continue(s);
- return RS_IDLE;
+ return RS_IDLE;
case 'F':
{
target_ulong ret;
@@ -1267,6 +1295,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
len = snprintf((char *)mem_buf, sizeof(buf) / 2,
"CPU#%d [%s]", cpu->cpu_index,
cpu->halted ? "halted " : "running");
+ trace_gdbstub_op_extra_info((char *)mem_buf);
memtohex(buf, mem_buf, len);
put_packet(s, buf);
}
@@ -1350,7 +1379,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
buf[0] = 'l';
len = memtox(buf + 1, xml + addr, total_len - addr);
}
- put_packet_binary(s, buf, len + 1);
+ put_packet_binary(s, buf, len + 1, true);
break;
}
if (is_query_packet(p, "Attached", ':')) {
@@ -1407,29 +1436,38 @@ static void gdb_vm_state_change(void *opaque, int running, RunState state)
type = "";
break;
}
+ trace_gdbstub_hit_watchpoint(type, cpu_gdb_index(cpu),
+ (target_ulong)cpu->watchpoint_hit->vaddr);
snprintf(buf, sizeof(buf),
"T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
GDB_SIGNAL_TRAP, cpu_gdb_index(cpu), type,
(target_ulong)cpu->watchpoint_hit->vaddr);
cpu->watchpoint_hit = NULL;
goto send_packet;
+ } else {
+ trace_gdbstub_hit_break();
}
tb_flush(cpu);
ret = GDB_SIGNAL_TRAP;
break;
case RUN_STATE_PAUSED:
+ trace_gdbstub_hit_paused();
ret = GDB_SIGNAL_INT;
break;
case RUN_STATE_SHUTDOWN:
+ trace_gdbstub_hit_shutdown();
ret = GDB_SIGNAL_QUIT;
break;
case RUN_STATE_IO_ERROR:
+ trace_gdbstub_hit_io_error();
ret = GDB_SIGNAL_IO;
break;
case RUN_STATE_WATCHDOG:
+ trace_gdbstub_hit_watchdog();
ret = GDB_SIGNAL_ALRM;
break;
case RUN_STATE_INTERNAL_ERROR:
+ trace_gdbstub_hit_internal_error();
ret = GDB_SIGNAL_ABRT;
break;
case RUN_STATE_SAVE_VM:
@@ -1439,6 +1477,7 @@ static void gdb_vm_state_change(void *opaque, int running, RunState state)
ret = GDB_SIGNAL_XCPU;
break;
default:
+ trace_gdbstub_hit_unknown(state);
ret = GDB_SIGNAL_UNKNOWN;
break;
}
@@ -1538,12 +1577,12 @@ static void gdb_read_byte(GDBState *s, int ch)
/* Waiting for a response to the last packet. If we see the start
of a new command then abandon the previous response. */
if (ch == '-') {
- gdb_debug("Got NACK, retransmitting\n");
+ trace_gdbstub_err_got_nack();
put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
} else if (ch == '+') {
- gdb_debug("Got ACK\n");
+ trace_gdbstub_io_got_ack();
} else {
- gdb_debug("Got '%c' when expecting ACK/NACK\n", ch);
+ trace_gdbstub_io_got_unexpected((uint8_t)ch);
}
if (ch == '+' || ch == '$')
@@ -1566,7 +1605,7 @@ static void gdb_read_byte(GDBState *s, int ch)
s->line_sum = 0;
s->state = RS_GETLINE;
} else {
- gdb_debug("received garbage between packets: 0x%x\n", ch);
+ trace_gdbstub_err_garbage((uint8_t)ch);
}
break;
case RS_GETLINE:
@@ -1582,7 +1621,7 @@ static void gdb_read_byte(GDBState *s, int ch)
/* end of command, start of checksum*/
s->state = RS_CHKSUM1;
} else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
- gdb_debug("command buffer overrun, dropping command\n");
+ trace_gdbstub_err_overrun();
s->state = RS_IDLE;
} else {
/* unescaped command character */
@@ -1596,7 +1635,7 @@ static void gdb_read_byte(GDBState *s, int ch)
s->state = RS_CHKSUM1;
} else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
/* command buffer overrun */
- gdb_debug("command buffer overrun, dropping command\n");
+ trace_gdbstub_err_overrun();
s->state = RS_IDLE;
} else {
/* parse escaped character and leave escape state */
@@ -1608,18 +1647,18 @@ static void gdb_read_byte(GDBState *s, int ch)
case RS_GETLINE_RLE:
if (ch < ' ') {
/* invalid RLE count encoding */
- gdb_debug("got invalid RLE count: 0x%x\n", ch);
+ trace_gdbstub_err_invalid_repeat((uint8_t)ch);
s->state = RS_GETLINE;
} else {
/* decode repeat length */
int repeat = (unsigned char)ch - ' ' + 3;
if (s->line_buf_index + repeat >= sizeof(s->line_buf) - 1) {
/* that many repeats would overrun the command buffer */
- gdb_debug("command buffer overrun, dropping command\n");
+ trace_gdbstub_err_overrun();
s->state = RS_IDLE;
} else if (s->line_buf_index < 1) {
/* got a repeat but we have nothing to repeat */
- gdb_debug("got invalid RLE sequence\n");
+ trace_gdbstub_err_invalid_rle();
s->state = RS_GETLINE;
} else {
/* repeat the last character */
@@ -1634,7 +1673,7 @@ static void gdb_read_byte(GDBState *s, int ch)
case RS_CHKSUM1:
/* get high hex digit of checksum */
if (!isxdigit(ch)) {
- gdb_debug("got invalid command checksum digit\n");
+ trace_gdbstub_err_checksum_invalid((uint8_t)ch);
s->state = RS_GETLINE;
break;
}
@@ -1645,14 +1684,14 @@ static void gdb_read_byte(GDBState *s, int ch)
case RS_CHKSUM2:
/* get low hex digit of checksum */
if (!isxdigit(ch)) {
- gdb_debug("got invalid command checksum digit\n");
+ trace_gdbstub_err_checksum_invalid((uint8_t)ch);
s->state = RS_GETLINE;
break;
}
s->line_csum |= fromhex(ch);
if (s->line_csum != (s->line_sum & 0xff)) {
- gdb_debug("got command packet with incorrect checksum\n");
+ trace_gdbstub_err_checksum_incorrect(s->line_sum, s->line_csum);
/* send NAK reply */
reply = '-';
put_buffer(s, &reply, 1);
@@ -1686,6 +1725,8 @@ void gdb_exit(CPUArchState *env, int code)
}
#endif
+ trace_gdbstub_op_exiting((uint8_t)code);
+
snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
put_packet(s, buf);
@@ -1944,6 +1985,8 @@ static const TypeInfo char_gdb_type_info = {
int gdbserver_start(const char *device)
{
+ trace_gdbstub_op_start(device);
+
GDBState *s;
char gdbstub_device_name[128];
Chardev *chr = NULL;
diff --git a/hmp-commands.hx b/hmp-commands.hx
index 4afd57cf5f..6d5ebdf6ab 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -666,39 +666,6 @@ Compute the checksum of a memory region.
ETEXI
{
- .name = "usb_add",
- .args_type = "devname:s",
- .params = "device",
- .help = "add USB device (e.g. 'host:bus.addr' or 'host:vendor_id:product_id')",
- .cmd = hmp_usb_add,
- },
-
-STEXI
-@item usb_add @var{devname}
-@findex usb_add
-Add the USB device @var{devname}. This command is deprecated, please
-use @code{device_add} instead. For details of available devices see
-@ref{usb_devices}
-ETEXI
-
- {
- .name = "usb_del",
- .args_type = "devname:s",
- .params = "device",
- .help = "remove USB device 'bus.addr'",
- .cmd = hmp_usb_del,
- },
-
-STEXI
-@item usb_del @var{devname}
-@findex usb_del
-Remove the USB device @var{devname} from the QEMU virtual USB
-hub. @var{devname} has the syntax @code{bus.addr}. Use the monitor
-command @code{info usb} to see the devices you can remove. This
-command is deprecated, please use @code{device_del} instead.
-ETEXI
-
- {
.name = "device_add",
.args_type = "device:O",
.params = "driver[,prop=value][,...]",
diff --git a/hmp.c b/hmp.c
index 35a7041824..2d72f94193 100644
--- a/hmp.c
+++ b/hmp.c
@@ -2318,7 +2318,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
{
BlockBackend *blk;
BlockBackend *local_blk = NULL;
- AioContext *aio_context;
const char* device = qdict_get_str(qdict, "device");
const char* command = qdict_get_str(qdict, "command");
Error *err = NULL;
@@ -2338,9 +2337,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
}
}
- aio_context = blk_get_aio_context(blk);
- aio_context_acquire(aio_context);
-
/*
* Notably absent: Proper permission management. This is sad, but it seems
* almost impossible to achieve without changing the semantics and thereby
@@ -2368,8 +2364,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict)
*/
qemuio_command(blk, command);
- aio_context_release(aio_context);
-
fail:
blk_unref(local_blk);
hmp_handle_error(mon, &err);
diff --git a/hw/9pfs/9p-handle.c b/hw/9pfs/9p-handle.c
index 9875f1894c..c1681d3c8a 100644
--- a/hw/9pfs/9p-handle.c
+++ b/hw/9pfs/9p-handle.c
@@ -41,10 +41,10 @@
#define BTRFS_SUPER_MAGIC 0x9123683E
#endif
-struct handle_data {
+typedef struct HandleData {
int mountfd;
int handle_bytes;
-};
+} HandleData;
static inline int name_to_handle(int dirfd, const char *name,
struct file_handle *fh, int *mnt_id, int flags)
@@ -79,7 +79,7 @@ static int handle_lstat(FsContext *fs_ctx, V9fsPath *fs_path,
struct stat *stbuf)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_PATH);
if (fd < 0) {
@@ -94,7 +94,7 @@ static ssize_t handle_readlink(FsContext *fs_ctx, V9fsPath *fs_path,
char *buf, size_t bufsz)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_PATH);
if (fd < 0) {
@@ -118,7 +118,7 @@ static int handle_closedir(FsContext *ctx, V9fsFidOpenState *fs)
static int handle_open(FsContext *ctx, V9fsPath *fs_path,
int flags, V9fsFidOpenState *fs)
{
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fs->fd = open_by_handle(data->mountfd, fs_path->data, flags);
return fs->fd;
@@ -207,7 +207,7 @@ static ssize_t handle_pwritev(FsContext *ctx, V9fsFidOpenState *fs,
static int handle_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -222,7 +222,7 @@ static int handle_mknod(FsContext *fs_ctx, V9fsPath *dir_path,
const char *name, FsCred *credp)
{
int dirfd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH);
if (dirfd < 0) {
@@ -240,7 +240,7 @@ static int handle_mkdir(FsContext *fs_ctx, V9fsPath *dir_path,
const char *name, FsCred *credp)
{
int dirfd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH);
if (dirfd < 0) {
@@ -272,7 +272,7 @@ static int handle_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name,
{
int ret;
int dirfd, fd;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH);
if (dirfd < 0) {
@@ -297,7 +297,7 @@ static int handle_symlink(FsContext *fs_ctx, const char *oldpath,
V9fsPath *dir_path, const char *name, FsCred *credp)
{
int fd, dirfd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH);
if (dirfd < 0) {
@@ -322,7 +322,7 @@ static int handle_link(FsContext *ctx, V9fsPath *oldpath,
V9fsPath *dirpath, const char *name)
{
int oldfd, newdirfd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
oldfd = open_by_handle(data->mountfd, oldpath->data, O_PATH);
if (oldfd < 0) {
@@ -342,7 +342,7 @@ static int handle_link(FsContext *ctx, V9fsPath *oldpath,
static int handle_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK | O_WRONLY);
if (fd < 0) {
@@ -363,7 +363,7 @@ static int handle_rename(FsContext *ctx, const char *oldpath,
static int handle_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)fs_ctx->private;
+ HandleData *data = (HandleData *) fs_ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_PATH);
if (fd < 0) {
@@ -379,7 +379,7 @@ static int handle_utimensat(FsContext *ctx, V9fsPath *fs_path,
{
int ret;
int fd;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -418,7 +418,7 @@ static int handle_statfs(FsContext *ctx, V9fsPath *fs_path,
struct statfs *stbuf)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -433,7 +433,7 @@ static ssize_t handle_lgetxattr(FsContext *ctx, V9fsPath *fs_path,
const char *name, void *value, size_t size)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -448,7 +448,7 @@ static ssize_t handle_llistxattr(FsContext *ctx, V9fsPath *fs_path,
void *value, size_t size)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -463,7 +463,7 @@ static int handle_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name,
void *value, size_t size, int flags)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -478,7 +478,7 @@ static int handle_lremovexattr(FsContext *ctx, V9fsPath *fs_path,
const char *name)
{
int fd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK);
if (fd < 0) {
@@ -495,7 +495,7 @@ static int handle_name_to_path(FsContext *ctx, V9fsPath *dir_path,
char *buffer;
struct file_handle *fh;
int dirfd, ret, mnt_id;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
/* "." and ".." are not allowed */
if (!strcmp(name, ".") || !strcmp(name, "..")) {
@@ -536,7 +536,7 @@ static int handle_renameat(FsContext *ctx, V9fsPath *olddir,
const char *new_name)
{
int olddirfd, newdirfd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
olddirfd = open_by_handle(data->mountfd, olddir->data, O_PATH);
if (olddirfd < 0) {
@@ -557,7 +557,7 @@ static int handle_unlinkat(FsContext *ctx, V9fsPath *dir,
const char *name, int flags)
{
int dirfd, ret;
- struct handle_data *data = (struct handle_data *)ctx->private;
+ HandleData *data = (HandleData *) ctx->private;
int rflags;
dirfd = open_by_handle(data->mountfd, dir->data, O_PATH);
@@ -604,12 +604,12 @@ static int handle_ioc_getversion(FsContext *ctx, V9fsPath *path,
#endif
}
-static int handle_init(FsContext *ctx)
+static int handle_init(FsContext *ctx, Error **errp)
{
int ret, mnt_id;
struct statfs stbuf;
struct file_handle fh;
- struct handle_data *data = g_malloc(sizeof(struct handle_data));
+ HandleData *data = g_malloc(sizeof(HandleData));
data->mountfd = open(ctx->fs_root, O_DIRECTORY);
if (data->mountfd < 0) {
@@ -646,17 +646,19 @@ out:
static void handle_cleanup(FsContext *ctx)
{
- struct handle_data *data = ctx->private;
+ HandleData *data = ctx->private;
close(data->mountfd);
g_free(data);
}
-static int handle_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse)
+static int handle_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp)
{
const char *sec_model = qemu_opt_get(opts, "security_model");
const char *path = qemu_opt_get(opts, "path");
+ warn_report("handle backend is deprecated");
+
if (sec_model) {
error_report("Invalid argument security_model specified with handle fsdriver");
return -1;
diff --git a/hw/9pfs/9p-local.c b/hw/9pfs/9p-local.c
index e51af87309..b25c185ff0 100644
--- a/hw/9pfs/9p-local.c
+++ b/hw/9pfs/9p-local.c
@@ -1400,13 +1400,14 @@ static int local_ioc_getversion(FsContext *ctx, V9fsPath *path,
#endif
}
-static int local_init(FsContext *ctx)
+static int local_init(FsContext *ctx, Error **errp)
{
struct statfs stbuf;
LocalData *data = g_malloc(sizeof(*data));
data->mountfd = open(ctx->fs_root, O_DIRECTORY | O_RDONLY);
if (data->mountfd == -1) {
+ error_setg_errno(errp, errno, "failed to open '%s'", ctx->fs_root);
goto err;
}
@@ -1459,16 +1460,21 @@ static void local_cleanup(FsContext *ctx)
g_free(data);
}
-static int local_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse)
+static void error_append_security_model_hint(Error **errp)
+{
+ error_append_hint(errp, "Valid options are: security_model="
+ "[passthrough|mapped-xattr|mapped-file|none]\n");
+}
+
+static int local_parse_opts(QemuOpts *opts, FsDriverEntry *fse, Error **errp)
{
const char *sec_model = qemu_opt_get(opts, "security_model");
const char *path = qemu_opt_get(opts, "path");
- Error *err = NULL;
+ Error *local_err = NULL;
if (!sec_model) {
- error_report("Security model not specified, local fs needs security model");
- error_printf("valid options are:"
- "\tsecurity_model=[passthrough|mapped-xattr|mapped-file|none]\n");
+ error_setg(errp, "security_model property not set");
+ error_append_security_model_hint(errp);
return -1;
}
@@ -1482,20 +1488,20 @@ static int local_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse)
} else if (!strcmp(sec_model, "mapped-file")) {
fse->export_flags |= V9FS_SM_MAPPED_FILE;
} else {
- error_report("Invalid security model %s specified", sec_model);
- error_printf("valid options are:"
- "\t[passthrough|mapped-xattr|mapped-file|none]\n");
+ error_setg(errp, "invalid security_model property '%s'", sec_model);
+ error_append_security_model_hint(errp);
return -1;
}
if (!path) {
- error_report("fsdev: No path specified");
+ error_setg(errp, "path property not set");
return -1;
}
- fsdev_throttle_parse_opts(opts, &fse->fst, &err);
- if (err) {
- error_reportf_err(err, "Throttle configuration is not valid: ");
+ fsdev_throttle_parse_opts(opts, &fse->fst, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_prepend(errp, "invalid throttle configuration: ");
return -1;
}
@@ -1507,11 +1513,11 @@ static int local_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse)
qemu_opt_get_number(opts, "dmode", SM_LOCAL_DIR_MODE_BITS) & 0777;
} else {
if (qemu_opt_find(opts, "fmode")) {
- error_report("fmode is only valid for mapped 9p modes");
+ error_setg(errp, "fmode is only valid for mapped security modes");
return -1;
}
if (qemu_opt_find(opts, "dmode")) {
- error_report("dmode is only valid for mapped 9p modes");
+ error_setg(errp, "dmode is only valid for mapped security modes");
return -1;
}
}
diff --git a/hw/9pfs/9p-proxy.c b/hw/9pfs/9p-proxy.c
index 28b20a7c3d..f030c6a428 100644
--- a/hw/9pfs/9p-proxy.c
+++ b/hw/9pfs/9p-proxy.c
@@ -1083,25 +1083,25 @@ static int proxy_ioc_getversion(FsContext *fs_ctx, V9fsPath *path,
return err;
}
-static int connect_namedsocket(const char *path)
+static int connect_namedsocket(const char *path, Error **errp)
{
int sockfd, size;
struct sockaddr_un helper;
if (strlen(path) >= sizeof(helper.sun_path)) {
- error_report("Socket name too long");
+ error_setg(errp, "socket name too long");
return -1;
}
sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
if (sockfd < 0) {
- error_report("Failed to create socket: %s", strerror(errno));
+ error_setg_errno(errp, errno, "failed to create client socket");
return -1;
}
strcpy(helper.sun_path, path);
helper.sun_family = AF_UNIX;
size = strlen(helper.sun_path) + sizeof(helper.sun_family);
if (connect(sockfd, (struct sockaddr *)&helper, size) < 0) {
- error_report("Failed to connect to %s: %s", path, strerror(errno));
+ error_setg_errno(errp, errno, "failed to connect to '%s'", path);
close(sockfd);
return -1;
}
@@ -1111,17 +1111,27 @@ static int connect_namedsocket(const char *path)
return sockfd;
}
-static int proxy_parse_opts(QemuOpts *opts, struct FsDriverEntry *fs)
+static void error_append_socket_sockfd_hint(Error **errp)
+{
+ error_append_hint(errp, "Either specify socket=/some/path where /some/path"
+ " points to a listening AF_UNIX socket or sock_fd=fd"
+ " where fd is a file descriptor to a connected AF_UNIX"
+ " socket\n");
+}
+
+static int proxy_parse_opts(QemuOpts *opts, FsDriverEntry *fs, Error **errp)
{
const char *socket = qemu_opt_get(opts, "socket");
const char *sock_fd = qemu_opt_get(opts, "sock_fd");
if (!socket && !sock_fd) {
- error_report("Must specify either socket or sock_fd");
+ error_setg(errp, "both socket and sock_fd properties are missing");
+ error_append_socket_sockfd_hint(errp);
return -1;
}
if (socket && sock_fd) {
- error_report("Both socket and sock_fd options specified");
+ error_setg(errp, "both socket and sock_fd properties are set");
+ error_append_socket_sockfd_hint(errp);
return -1;
}
if (socket) {
@@ -1134,17 +1144,17 @@ static int proxy_parse_opts(QemuOpts *opts, struct FsDriverEntry *fs)
return 0;
}
-static int proxy_init(FsContext *ctx)
+static int proxy_init(FsContext *ctx, Error **errp)
{
V9fsProxy *proxy = g_malloc(sizeof(V9fsProxy));
int sock_id;
if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) {
- sock_id = connect_namedsocket(ctx->fs_root);
+ sock_id = connect_namedsocket(ctx->fs_root, errp);
} else {
sock_id = atoi(ctx->fs_root);
if (sock_id < 0) {
- error_report("Socket descriptor not initialized");
+ error_setg(errp, "socket descriptor not initialized");
}
}
if (sock_id < 0) {
diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c
index df0a8de08a..8f255e91c0 100644
--- a/hw/9pfs/9p-synth.c
+++ b/hw/9pfs/9p-synth.c
@@ -514,7 +514,7 @@ static int synth_unlinkat(FsContext *ctx, V9fsPath *dir,
return -1;
}
-static int synth_init(FsContext *ctx)
+static int synth_init(FsContext *ctx, Error **errp)
{
QLIST_INIT(&synth_root.child);
qemu_mutex_init(&synth_mutex);
diff --git a/hw/9pfs/9p-xattr.h b/hw/9pfs/9p-xattr.h
index 0d83996575..35bcd24f77 100644
--- a/hw/9pfs/9p-xattr.h
+++ b/hw/9pfs/9p-xattr.h
@@ -16,8 +16,7 @@
#include "qemu/xattr.h"
-typedef struct xattr_operations
-{
+struct XattrOperations {
const char *name;
ssize_t (*getxattr)(FsContext *ctx, const char *path,
const char *name, void *value, size_t size);
@@ -27,7 +26,7 @@ typedef struct xattr_operations
void *value, size_t size, int flags);
int (*removexattr)(FsContext *ctx,
const char *path, const char *name);
-} XattrOperations;
+};
ssize_t local_getxattr_nofollow(FsContext *ctx, const char *path,
const char *name, void *value, size_t size);
diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c
index 52d46632fe..909a611394 100644
--- a/hw/9pfs/9p.c
+++ b/hw/9pfs/9p.c
@@ -41,7 +41,7 @@ enum {
Oappend = 0x80,
};
-ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
+static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
{
ssize_t ret;
va_list ap;
@@ -53,7 +53,7 @@ ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
return ret;
}
-ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
+static ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...)
{
ssize_t ret;
va_list ap;
@@ -99,10 +99,10 @@ static int omode_to_uflags(int8_t mode)
return ret;
}
-struct dotl_openflag_map {
+typedef struct DotlOpenflagMap {
int dotl_flag;
int open_flag;
-};
+} DotlOpenflagMap;
static int dotl_to_open_flags(int flags)
{
@@ -113,7 +113,7 @@ static int dotl_to_open_flags(int flags)
*/
int oflags = flags & O_ACCMODE;
- struct dotl_openflag_map dotl_oflag_map[] = {
+ DotlOpenflagMap dotl_oflag_map[] = {
{ P9_DOTL_CREATE, O_CREAT },
{ P9_DOTL_EXCL, O_EXCL },
{ P9_DOTL_NOCTTY , O_NOCTTY },
@@ -3473,14 +3473,12 @@ void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr)
if (pdu->id >= ARRAY_SIZE(pdu_co_handlers) ||
(pdu_co_handlers[pdu->id] == NULL)) {
handler = v9fs_op_not_supp;
+ } else if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
+ handler = v9fs_fs_ro;
} else {
handler = pdu_co_handlers[pdu->id];
}
- if (is_ro_export(&s->ctx) && !is_read_only_op(pdu)) {
- handler = v9fs_fs_ro;
- }
-
qemu_co_queue_init(&pdu->complete);
co = qemu_coroutine_create(handler, pdu);
qemu_coroutine_enter(co);
@@ -3544,9 +3542,9 @@ int v9fs_device_realize_common(V9fsState *s, Error **errp)
s->fid_list = NULL;
qemu_co_rwlock_init(&s->rename_lock);
- if (s->ops->init(&s->ctx) < 0) {
- error_setg(errp, "9pfs Failed to initialize fs-driver with id:%s"
- " and export path:%s", s->fsconf.fsdev_id, s->ctx.fs_root);
+ if (s->ops->init(&s->ctx, errp) < 0) {
+ error_prepend(errp, "cannot initialize fsdev '%s': ",
+ s->fsconf.fsdev_id);
goto out;
}
diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h
index cdfc4f4ce7..ffe658ab89 100644
--- a/hw/9pfs/9p.h
+++ b/hw/9pfs/9p.h
@@ -94,10 +94,10 @@ enum {
P9_QTFILE = 0x00,
};
-enum p9_proto_version {
+typedef enum P9ProtoVersion {
V9FS_PROTO_2000U = 0x01,
V9FS_PROTO_2000L = 0x02,
-};
+} P9ProtoVersion;
#define P9_NOTAG UINT16_MAX
#define P9_NOFID UINT32_MAX
@@ -118,6 +118,7 @@ static inline char *rpath(FsContext *ctx, const char *path)
typedef struct V9fsPDU V9fsPDU;
typedef struct V9fsState V9fsState;
+typedef struct V9fsTransport V9fsTransport;
typedef struct {
uint32_t size_le;
@@ -238,10 +239,10 @@ struct V9fsState
FileOperations *ops;
FsContext ctx;
char *tag;
- enum p9_proto_version proto_version;
+ P9ProtoVersion proto_version;
int32_t msize;
V9fsPDU pdus[MAX_REQ];
- const struct V9fsTransport *transport;
+ const V9fsTransport *transport;
/*
* lock ensuring atomic path update
* on rename.
@@ -348,8 +349,6 @@ int v9fs_name_to_path(V9fsState *s, V9fsPath *dirpath,
int v9fs_device_realize_common(V9fsState *s, Error **errp);
void v9fs_device_unrealize_common(V9fsState *s, Error **errp);
-ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...);
-ssize_t pdu_unmarshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...);
V9fsPDU *pdu_alloc(V9fsState *s);
void pdu_free(V9fsPDU *pdu);
void pdu_submit(V9fsPDU *pdu, P9MsgHeader *hdr);
@@ -367,8 +366,7 @@ struct V9fsTransport {
void (*push_and_notify)(V9fsPDU *pdu);
};
-static inline int v9fs_register_transport(V9fsState *s,
- const struct V9fsTransport *t)
+static inline int v9fs_register_transport(V9fsState *s, const V9fsTransport *t)
{
assert(!s->transport);
s->transport = t;
diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c
index 62650b0a6b..43f4e53f33 100644
--- a/hw/9pfs/virtio-9p-device.c
+++ b/hw/9pfs/virtio-9p-device.c
@@ -20,8 +20,6 @@
#include "hw/virtio/virtio-access.h"
#include "qemu/iov.h"
-static const struct V9fsTransport virtio_9p_transport;
-
static void virtio_9p_push_and_notify(V9fsPDU *pdu)
{
V9fsState *s = pdu->s;
@@ -104,35 +102,6 @@ static void virtio_9p_get_config(VirtIODevice *vdev, uint8_t *config)
g_free(cfg);
}
-static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- V9fsVirtioState *v = VIRTIO_9P(dev);
- V9fsState *s = &v->state;
-
- if (v9fs_device_realize_common(s, errp)) {
- goto out;
- }
-
- v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag);
- virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size);
- v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
- v9fs_register_transport(s, &virtio_9p_transport);
-
-out:
- return;
-}
-
-static void virtio_9p_device_unrealize(DeviceState *dev, Error **errp)
-{
- VirtIODevice *vdev = VIRTIO_DEVICE(dev);
- V9fsVirtioState *v = VIRTIO_9P(dev);
- V9fsState *s = &v->state;
-
- virtio_cleanup(vdev);
- v9fs_device_unrealize_common(s, errp);
-}
-
static void virtio_9p_reset(VirtIODevice *vdev)
{
V9fsVirtioState *v = (V9fsVirtioState *)vdev;
@@ -215,7 +184,7 @@ static void virtio_init_out_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov,
*pniov = elem->out_num;
}
-static const struct V9fsTransport virtio_9p_transport = {
+static const V9fsTransport virtio_9p_transport = {
.pdu_vmarshal = virtio_pdu_vmarshal,
.pdu_vunmarshal = virtio_pdu_vunmarshal,
.init_in_iov_from_pdu = virtio_init_in_iov_from_pdu,
@@ -223,6 +192,35 @@ static const struct V9fsTransport virtio_9p_transport = {
.push_and_notify = virtio_9p_push_and_notify,
};
+static void virtio_9p_device_realize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ V9fsVirtioState *v = VIRTIO_9P(dev);
+ V9fsState *s = &v->state;
+
+ if (v9fs_device_realize_common(s, errp)) {
+ goto out;
+ }
+
+ v->config_size = sizeof(struct virtio_9p_config) + strlen(s->fsconf.tag);
+ virtio_init(vdev, "virtio-9p", VIRTIO_ID_9P, v->config_size);
+ v->vq = virtio_add_queue(vdev, MAX_REQ, handle_9p_output);
+ v9fs_register_transport(s, &virtio_9p_transport);
+
+out:
+ return;
+}
+
+static void virtio_9p_device_unrealize(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ V9fsVirtioState *v = VIRTIO_9P(dev);
+ V9fsState *s = &v->state;
+
+ virtio_cleanup(vdev);
+ v9fs_device_unrealize_common(s, errp);
+}
+
/* virtio-9p device */
static const VMStateDescription vmstate_virtio_9p = {
diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c
index ee87f08926..df2a4100bf 100644
--- a/hw/9pfs/xen-9p-backend.c
+++ b/hw/9pfs/xen-9p-backend.c
@@ -233,7 +233,7 @@ static void xen_9pfs_push_and_notify(V9fsPDU *pdu)
qemu_bh_schedule(ring->bh);
}
-static const struct V9fsTransport xen_9p_transport = {
+static const V9fsTransport xen_9p_transport = {
.pdu_vmarshal = xen_9pfs_pdu_vmarshal,
.pdu_vunmarshal = xen_9pfs_pdu_vunmarshal,
.init_in_iov_from_pdu = xen_9pfs_init_in_iov_from_pdu,
diff --git a/hw/acpi/core.c b/hw/acpi/core.c
index cd0a1d357b..eb9b76f70b 100644
--- a/hw/acpi/core.c
+++ b/hw/acpi/core.c
@@ -21,7 +21,6 @@
#include "qemu/osdep.h"
#include "sysemu/sysemu.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/acpi/acpi.h"
#include "hw/nvram/fw_cfg.h"
#include "qemu/config-file.h"
diff --git a/hw/acpi/ipmi-stub.c b/hw/acpi/ipmi-stub.c
index 98b6dcee0d..f525f71c2d 100644
--- a/hw/acpi/ipmi-stub.c
+++ b/hw/acpi/ipmi-stub.c
@@ -7,6 +7,7 @@
* See the COPYING file in the top-level directory.
*/
+#include "qemu/osdep.h"
#include "hw/acpi/ipmi.h"
void build_acpi_ipmi_devices(Aml *table, BusState *bus)
diff --git a/hw/alpha/dp264.c b/hw/alpha/dp264.c
index babd6ea514..766373eec7 100644
--- a/hw/alpha/dp264.c
+++ b/hw/alpha/dp264.c
@@ -78,9 +78,9 @@ static void clipper_init(MachineState *machine)
clipper_pci_map_irq);
/* Since we have an SRM-compatible PALcode, use the SRM epoch. */
- rtc_init(isa_bus, 1900, rtc_irq);
+ mc146818_rtc_init(isa_bus, 1900, rtc_irq);
- pit_init(isa_bus, 0x40, 0, NULL);
+ i8254_pit_init(isa_bus, 0x40, 0, NULL);
isa_create_simple(isa_bus, "i8042");
/* VGA setup. Don't bother loading the bios. */
diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c
index feccdb00d3..ac1e15cbbc 100644
--- a/hw/arm/spitz.c
+++ b/hw/arm/spitz.c
@@ -29,7 +29,6 @@
#include "sysemu/block-backend.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
-#include "sysemu/sysemu.h"
#include "cpu.h"
#undef REG_FMT
diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c
index bbe7d046e4..b126cf148b 100644
--- a/hw/arm/xlnx-zcu102.c
+++ b/hw/arm/xlnx-zcu102.c
@@ -151,6 +151,29 @@ static void xlnx_zynqmp_init(XlnxZCU102 *s, MachineState *machine)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.spi[i]), 1, cs_line);
}
+ for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_FLASH; i++) {
+ SSIBus *spi_bus;
+ DeviceState *flash_dev;
+ qemu_irq cs_line;
+ DriveInfo *dinfo = drive_get_next(IF_MTD);
+ int bus = i / XLNX_ZYNQMP_NUM_QSPI_BUS_CS;
+ gchar *bus_name = g_strdup_printf("qspi%d", bus);
+
+ spi_bus = (SSIBus *)qdev_get_child_bus(DEVICE(&s->soc), bus_name);
+ g_free(bus_name);
+
+ flash_dev = ssi_create_slave_no_init(spi_bus, "n25q512a11");
+ if (dinfo) {
+ qdev_prop_set_drive(flash_dev, "drive", blk_by_legacy_dinfo(dinfo),
+ &error_fatal);
+ }
+ qdev_init_nofail(flash_dev);
+
+ cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0);
+
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.qspi), i + 1, cs_line);
+ }
+
/* TODO create and connect IDE devices for ide_drive_get() */
xlnx_zcu102_binfo.ram_size = ram_size;
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index c707c66322..325642058b 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -40,6 +40,10 @@
#define SATA_ADDR 0xFD0C0000
#define SATA_NUM_PORTS 2
+#define QSPI_ADDR 0xff0f0000
+#define LQSPI_ADDR 0xc0000000
+#define QSPI_IRQ 15
+
#define DP_ADDR 0xfd4a0000
#define DP_IRQ 113
@@ -171,6 +175,9 @@ static void xlnx_zynqmp_init(Object *obj)
qdev_set_parent_bus(DEVICE(&s->spi[i]), sysbus_get_default());
}
+ object_initialize(&s->qspi, sizeof(s->qspi), TYPE_XLNX_ZYNQMP_QSPIPS);
+ qdev_set_parent_bus(DEVICE(&s->qspi), sysbus_get_default());
+
object_initialize(&s->dp, sizeof(s->dp), TYPE_XLNX_DP);
qdev_set_parent_bus(DEVICE(&s->dp), sysbus_get_default());
@@ -411,6 +418,25 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
g_free(bus_name);
}
+ object_property_set_bool(OBJECT(&s->qspi), true, "realized", &err);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 0, QSPI_ADDR);
+ sysbus_mmio_map(SYS_BUS_DEVICE(&s->qspi), 1, LQSPI_ADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->qspi), 0, gic_spi[QSPI_IRQ]);
+
+ for (i = 0; i < XLNX_ZYNQMP_NUM_QSPI_BUS; i++) {
+ gchar *bus_name;
+ gchar *target_bus;
+
+ /* Alias controller SPI bus to the SoC itself */
+ bus_name = g_strdup_printf("qspi%d", i);
+ target_bus = g_strdup_printf("spi%d", i);
+ object_property_add_alias(OBJECT(s), bus_name,
+ OBJECT(&s->qspi), target_bus,
+ &error_abort);
+ g_free(bus_name);
+ g_free(target_bus);
+ }
+
object_property_set_bool(OBJECT(&s->dp), true, "realized", &err);
if (err) {
error_propagate(errp, err);
diff --git a/hw/audio/fmopl.c b/hw/audio/fmopl.c
index 5cfb6a96dd..9f50a89b4a 100644
--- a/hw/audio/fmopl.c
+++ b/hw/audio/fmopl.c
@@ -34,7 +34,6 @@
#include <math.h>
//#include "driver.h" /* use M.A.M.E. */
#include "fmopl.h"
-#include "qemu/osdep.h"
#ifndef PI
#define PI 3.14159265358979323846
#endif
diff --git a/hw/audio/fmopl.h b/hw/audio/fmopl.h
index f4065f425c..e7e578a48e 100644
--- a/hw/audio/fmopl.h
+++ b/hw/audio/fmopl.h
@@ -1,7 +1,6 @@
#ifndef FMOPL_H
#define FMOPL_H
-#include <stdint.h>
typedef void (*OPL_TIMERHANDLER)(void *param, int channel, double interval_Sec);
diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c
index 0206f7399b..908696d483 100644
--- a/hw/audio/pcspk.c
+++ b/hw/audio/pcspk.c
@@ -24,7 +24,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "hw/audio/soundhw.h"
#include "audio/audio.h"
diff --git a/hw/block/block.c b/hw/block/block.c
index 27878d0087..b0269c857f 100644
--- a/hw/block/block.c
+++ b/hw/block/block.c
@@ -51,7 +51,7 @@ void blkconf_blocksizes(BlockConf *conf)
}
}
-void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
+bool blkconf_apply_backend_options(BlockConf *conf, bool readonly,
bool resizable, Error **errp)
{
BlockBackend *blk = conf->blk;
@@ -76,7 +76,7 @@ void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
ret = blk_set_perm(blk, perm, shared_perm, errp);
if (ret < 0) {
- return;
+ return false;
}
switch (conf->wce) {
@@ -99,9 +99,11 @@ void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
blk_set_enable_write_cache(blk, wce);
blk_set_on_error(blk, rerror, werror);
+
+ return true;
}
-void blkconf_geometry(BlockConf *conf, int *ptrans,
+bool blkconf_geometry(BlockConf *conf, int *ptrans,
unsigned cyls_max, unsigned heads_max, unsigned secs_max,
Error **errp)
{
@@ -129,15 +131,16 @@ void blkconf_geometry(BlockConf *conf, int *ptrans,
if (conf->cyls || conf->heads || conf->secs) {
if (conf->cyls < 1 || conf->cyls > cyls_max) {
error_setg(errp, "cyls must be between 1 and %u", cyls_max);
- return;
+ return false;
}
if (conf->heads < 1 || conf->heads > heads_max) {
error_setg(errp, "heads must be between 1 and %u", heads_max);
- return;
+ return false;
}
if (conf->secs < 1 || conf->secs > secs_max) {
error_setg(errp, "secs must be between 1 and %u", secs_max);
- return;
+ return false;
}
}
+ return true;
}
diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c
index 5556f0e64e..f6fc639e88 100644
--- a/hw/block/dataplane/virtio-blk.c
+++ b/hw/block/dataplane/virtio-blk.c
@@ -76,7 +76,7 @@ static void notify_guest_bh(void *opaque)
}
/* Context: QEMU global mutex held */
-void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
+bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp)
{
@@ -91,11 +91,11 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
error_setg(errp,
"device is incompatible with iothread "
"(transport does not support notifiers)");
- return;
+ return false;
}
if (!virtio_device_ioeventfd_enabled(vdev)) {
error_setg(errp, "ioeventfd is required for iothread");
- return;
+ return false;
}
/* If dataplane is (re-)enabled while the guest is running there could
@@ -103,12 +103,12 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
*/
if (blk_op_is_blocked(conf->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) {
error_prepend(errp, "cannot start virtio-blk dataplane: ");
- return;
+ return false;
}
}
/* Don't try if transport does not support notifiers. */
if (!virtio_device_ioeventfd_enabled(vdev)) {
- return;
+ return false;
}
s = g_new0(VirtIOBlockDataPlane, 1);
@@ -126,6 +126,8 @@ void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
s->batch_notify_vqs = bitmap_new(conf->num_queues);
*dataplane = s;
+
+ return true;
}
/* Context: QEMU global mutex held */
diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h
index db3f47b173..5e18bb99ae 100644
--- a/hw/block/dataplane/virtio-blk.h
+++ b/hw/block/dataplane/virtio-blk.h
@@ -19,7 +19,7 @@
typedef struct VirtIOBlockDataPlane VirtIOBlockDataPlane;
-void virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
+bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf,
VirtIOBlockDataPlane **dataplane,
Error **errp);
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s);
diff --git a/hw/block/fdc.c b/hw/block/fdc.c
index 67f78ac702..7b7dd41296 100644
--- a/hw/block/fdc.c
+++ b/hw/block/fdc.c
@@ -473,16 +473,13 @@ static void fd_revalidate(FDrive *drv)
static void fd_change_cb(void *opaque, bool load, Error **errp)
{
FDrive *drive = opaque;
- Error *local_err = NULL;
if (!load) {
blk_set_perm(drive->blk, 0, BLK_PERM_ALL, &error_abort);
} else {
- blkconf_apply_backend_options(drive->conf,
- blk_is_read_only(drive->blk), false,
- &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!blkconf_apply_backend_options(drive->conf,
+ blk_is_read_only(drive->blk), false,
+ errp)) {
return;
}
}
@@ -522,7 +519,6 @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp)
FloppyDrive *dev = FLOPPY_DRIVE(qdev);
FloppyBus *bus = FLOPPY_BUS(qdev->parent_bus);
FDrive *drive;
- Error *local_err = NULL;
int ret;
if (dev->unit == -1) {
@@ -568,10 +564,9 @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp)
dev->conf.rerror = BLOCKDEV_ON_ERROR_AUTO;
dev->conf.werror = BLOCKDEV_ON_ERROR_AUTO;
- blkconf_apply_backend_options(&dev->conf, blk_is_read_only(dev->conf.blk),
- false, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
+ if (!blkconf_apply_backend_options(&dev->conf,
+ blk_is_read_only(dev->conf.blk),
+ false, errp)) {
return;
}
diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c
index a2438b9ed2..ea142160b3 100644
--- a/hw/block/m25p80.c
+++ b/hw/block/m25p80.c
@@ -240,6 +240,8 @@ static const FlashPartInfo known_devices[] = {
{ INFO("n25q128a13", 0x20ba18, 0, 64 << 10, 256, ER_4K) },
{ INFO("n25q256a11", 0x20bb19, 0, 64 << 10, 512, ER_4K) },
{ INFO("n25q256a13", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
+ { INFO("n25q512a11", 0x20bb20, 0, 64 << 10, 1024, ER_4K) },
+ { INFO("n25q512a13", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
{ INFO("n25q128", 0x20ba18, 0, 64 << 10, 256, 0) },
{ INFO("n25q256a", 0x20ba19, 0, 64 << 10, 512, ER_4K) },
{ INFO("n25q512a", 0x20ba20, 0, 64 << 10, 1024, ER_4K) },
@@ -331,7 +333,10 @@ typedef enum {
WRDI = 0x4,
RDSR = 0x5,
WREN = 0x6,
+ BRRD = 0x16,
+ BRWR = 0x17,
JEDEC_READ = 0x9f,
+ BULK_ERASE_60 = 0x60,
BULK_ERASE = 0xc7,
READ_FSR = 0x70,
RDCR = 0x15,
@@ -355,6 +360,8 @@ typedef enum {
DPP = 0xa2,
QPP = 0x32,
QPP_4 = 0x34,
+ RDID_90 = 0x90,
+ RDID_AB = 0xab,
ERASE_4K = 0x20,
ERASE4_4K = 0x21,
@@ -405,6 +412,7 @@ typedef enum {
MAN_MACRONIX,
MAN_NUMONYX,
MAN_WINBOND,
+ MAN_SST,
MAN_GENERIC,
} Manufacturer;
@@ -423,6 +431,7 @@ typedef struct Flash {
uint8_t data[M25P80_INTERNAL_DATA_BUFFER_SZ];
uint32_t len;
uint32_t pos;
+ bool data_read_loop;
uint8_t needed_bytes;
uint8_t cmd_in_progress;
uint32_t cur_addr;
@@ -475,6 +484,8 @@ static inline Manufacturer get_man(Flash *s)
return MAN_SPANSION;
case 0xC2:
return MAN_MACRONIX;
+ case 0xBF:
+ return MAN_SST;
default:
return MAN_GENERIC;
}
@@ -698,6 +709,7 @@ static void complete_collecting_data(Flash *s)
s->write_enable = false;
}
break;
+ case BRWR:
case EXTEND_ADDR_WRITE:
s->ear = s->data[0];
break;
@@ -710,6 +722,31 @@ static void complete_collecting_data(Flash *s)
case WEVCR:
s->enh_volatile_cfg = s->data[0];
break;
+ case RDID_90:
+ case RDID_AB:
+ if (get_man(s) == MAN_SST) {
+ if (s->cur_addr <= 1) {
+ if (s->cur_addr) {
+ s->data[0] = s->pi->id[2];
+ s->data[1] = s->pi->id[0];
+ } else {
+ s->data[0] = s->pi->id[0];
+ s->data[1] = s->pi->id[2];
+ }
+ s->pos = 0;
+ s->len = 2;
+ s->data_read_loop = true;
+ s->state = STATE_READING_DATA;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M25P80: Invalid read id address\n");
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M25P80: Read id (command 0x90/0xAB) is not supported"
+ " by device\n");
+ }
+ break;
default:
break;
}
@@ -925,6 +962,8 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case PP4:
case PP4_4:
case DIE_ERASE:
+ case RDID_90:
+ case RDID_AB:
s->needed_bytes = get_addr_length(s);
s->pos = 0;
s->len = 0;
@@ -983,6 +1022,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
}
s->pos = 0;
s->len = 1;
+ s->data_read_loop = true;
s->state = STATE_READING_DATA;
break;
@@ -993,6 +1033,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
}
s->pos = 0;
s->len = 1;
+ s->data_read_loop = true;
s->state = STATE_READING_DATA;
break;
@@ -1015,6 +1056,7 @@ static void decode_new_cmd(Flash *s, uint32_t value)
s->state = STATE_READING_DATA;
break;
+ case BULK_ERASE_60:
case BULK_ERASE:
if (s->write_enable) {
DB_PRINT_L(0, "chip erase\n");
@@ -1032,12 +1074,14 @@ static void decode_new_cmd(Flash *s, uint32_t value)
case EX_4BYTE_ADDR:
s->four_bytes_address_mode = false;
break;
+ case BRRD:
case EXTEND_ADDR_READ:
s->data[0] = s->ear;
s->pos = 0;
s->len = 1;
s->state = STATE_READING_DATA;
break;
+ case BRWR:
case EXTEND_ADDR_WRITE:
if (s->write_enable) {
s->needed_bytes = 1;
@@ -1133,6 +1177,7 @@ static int m25p80_cs(SSISlave *ss, bool select)
s->pos = 0;
s->state = STATE_IDLE;
flash_sync_dirty(s, -1);
+ s->data_read_loop = false;
}
DB_PRINT_L(0, "%sselect\n", select ? "de" : "");
@@ -1198,7 +1243,9 @@ static uint32_t m25p80_transfer8(SSISlave *ss, uint32_t tx)
s->pos++;
if (s->pos == s->len) {
s->pos = 0;
- s->state = STATE_IDLE;
+ if (!s->data_read_loop) {
+ s->state = STATE_IDLE;
+ }
}
break;
@@ -1269,11 +1316,38 @@ static Property m25p80_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static int m25p80_pre_load(void *opaque)
+{
+ Flash *s = (Flash *)opaque;
+
+ s->data_read_loop = false;
+ return 0;
+}
+
+static bool m25p80_data_read_loop_needed(void *opaque)
+{
+ Flash *s = (Flash *)opaque;
+
+ return s->data_read_loop;
+}
+
+static const VMStateDescription vmstate_m25p80_data_read_loop = {
+ .name = "m25p80/data_read_loop",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m25p80_data_read_loop_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(data_read_loop, Flash),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_m25p80 = {
.name = "m25p80",
.version_id = 0,
.minimum_version_id = 0,
.pre_save = m25p80_pre_save,
+ .pre_load = m25p80_pre_load,
.fields = (VMStateField[]) {
VMSTATE_UINT8(state, Flash),
VMSTATE_UINT8_ARRAY(data, Flash, M25P80_INTERNAL_DATA_BUFFER_SZ),
@@ -1295,6 +1369,10 @@ static const VMStateDescription vmstate_m25p80 = {
VMSTATE_UINT8(spansion_cr3nv, Flash),
VMSTATE_UINT8(spansion_cr4nv, Flash),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_m25p80_data_read_loop,
+ NULL
}
};
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 441e21ed1f..1ac356d3a5 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -34,8 +34,17 @@
#include "qapi/visitor.h"
#include "sysemu/block-backend.h"
+#include "qemu/log.h"
+#include "trace.h"
#include "nvme.h"
+#define NVME_GUEST_ERR(trace, fmt, ...) \
+ do { \
+ (trace_##trace)(__VA_ARGS__); \
+ qemu_log_mask(LOG_GUEST_ERROR, #trace \
+ " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
+ } while (0)
+
static void nvme_process_sq(void *opaque);
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
@@ -86,10 +95,14 @@ static void nvme_isr_notify(NvmeCtrl *n, NvmeCQueue *cq)
{
if (cq->irq_enabled) {
if (msix_enabled(&(n->parent_obj))) {
+ trace_nvme_irq_msix(cq->vector);
msix_notify(&(n->parent_obj), cq->vector);
} else {
+ trace_nvme_irq_pin();
pci_irq_pulse(&n->parent_obj);
}
+ } else {
+ trace_nvme_irq_masked();
}
}
@@ -100,7 +113,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
trans_len = MIN(len, trans_len);
int num_prps = (len >> n->page_bits) + 1;
- if (!prp1) {
+ if (unlikely(!prp1)) {
+ trace_nvme_err_invalid_prp();
return NVME_INVALID_FIELD | NVME_DNR;
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
@@ -113,7 +127,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
}
len -= trans_len;
if (len) {
- if (!prp2) {
+ if (unlikely(!prp2)) {
+ trace_nvme_err_invalid_prp2_missing();
goto unmap;
}
if (len > n->page_size) {
@@ -128,7 +143,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
if (i == n->max_prp_ents - 1 && len > n->page_size) {
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
goto unmap;
}
@@ -140,7 +156,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
prp_ent = le64_to_cpu(prp_list[i]);
}
- if (!prp_ent || prp_ent & (n->page_size - 1)) {
+ if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
+ trace_nvme_err_invalid_prplist_ent(prp_ent);
goto unmap;
}
@@ -154,7 +171,8 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
i++;
}
} else {
- if (prp2 & (n->page_size - 1)) {
+ if (unlikely(prp2 & (n->page_size - 1))) {
+ trace_nvme_err_invalid_prp2_align(prp2);
goto unmap;
}
if (qsg->nsg) {
@@ -178,16 +196,20 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
+ trace_nvme_dma_read(prp1, prp2);
+
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
if (qsg.nsg > 0) {
- if (dma_buf_read(ptr, len, &qsg)) {
+ if (unlikely(dma_buf_read(ptr, len, &qsg))) {
+ trace_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_sglist_destroy(&qsg);
} else {
- if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
+ if (unlikely(qemu_iovec_to_buf(&iov, 0, ptr, len) != len)) {
+ trace_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_iovec_destroy(&iov);
@@ -273,7 +295,8 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint64_t aio_slba = slba << (data_shift - BDRV_SECTOR_BITS);
uint32_t aio_nlb = nlb << (data_shift - BDRV_SECTOR_BITS);
- if (slba + nlb > ns->id_ns.nsze) {
+ if (unlikely(slba + nlb > ns->id_ns.nsze)) {
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
return NVME_LBA_RANGE | NVME_DNR;
}
@@ -301,8 +324,11 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
- if ((slba + nlb) > ns->id_ns.nsze) {
+ trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
+
+ if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
+ trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
return NVME_LBA_RANGE | NVME_DNR;
}
@@ -336,7 +362,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
NvmeNamespace *ns;
uint32_t nsid = le32_to_cpu(cmd->nsid);
- if (nsid == 0 || nsid > n->num_namespaces) {
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
@@ -350,6 +377,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_CMD_READ:
return nvme_rw(n, ns, cmd, req);
default:
+ trace_nvme_err_invalid_opc(cmd->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -373,10 +401,13 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
- if (!qid || nvme_check_sqid(n, qid)) {
+ if (unlikely(!qid || nvme_check_sqid(n, qid))) {
+ trace_nvme_err_invalid_del_sq(qid);
return NVME_INVALID_QID | NVME_DNR;
}
+ trace_nvme_del_sq(qid);
+
sq = n->sq[qid];
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
req = QTAILQ_FIRST(&sq->out_req_list);
@@ -439,19 +470,26 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->sq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- if (!cqid || nvme_check_cqid(n, cqid)) {
+ trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
+
+ if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
+ trace_nvme_err_invalid_create_sq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
- if (!sqid || !nvme_check_sqid(n, sqid)) {
+ if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
+ trace_nvme_err_invalid_create_sq_sqid(sqid);
return NVME_INVALID_QID | NVME_DNR;
}
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
+ trace_nvme_err_invalid_create_sq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
- if (!prp1 || prp1 & (n->page_size - 1)) {
+ if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
+ trace_nvme_err_invalid_create_sq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
- if (!(NVME_SQ_FLAGS_PC(qflags))) {
+ if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
+ trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
return NVME_INVALID_FIELD | NVME_DNR;
}
sq = g_malloc0(sizeof(*sq));
@@ -476,14 +514,17 @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
NvmeCQueue *cq;
uint16_t qid = le16_to_cpu(c->qid);
- if (!qid || nvme_check_cqid(n, qid)) {
+ if (unlikely(!qid || nvme_check_cqid(n, qid))) {
+ trace_nvme_err_invalid_del_cq_cqid(qid);
return NVME_INVALID_CQID | NVME_DNR;
}
cq = n->cq[qid];
- if (!QTAILQ_EMPTY(&cq->sq_list)) {
+ if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
+ trace_nvme_err_invalid_del_cq_notempty(qid);
return NVME_INVALID_QUEUE_DEL;
}
+ trace_nvme_del_cq(qid);
nvme_free_cq(cq, n);
return NVME_SUCCESS;
}
@@ -516,19 +557,27 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->cq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- if (!cqid || !nvme_check_cqid(n, cqid)) {
+ trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
+ NVME_CQ_FLAGS_IEN(qflags) != 0);
+
+ if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
+ trace_nvme_err_invalid_create_cq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
- if (!qsize || qsize > NVME_CAP_MQES(n->bar.cap)) {
+ if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
+ trace_nvme_err_invalid_create_cq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
- if (!prp1) {
+ if (unlikely(!prp1)) {
+ trace_nvme_err_invalid_create_cq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
- if (vector > n->num_queues) {
+ if (unlikely(vector > n->num_queues)) {
+ trace_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
- if (!(NVME_CQ_FLAGS_PC(qflags))) {
+ if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
+ trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -543,6 +592,8 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
+ trace_nvme_identify_ctrl();
+
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
prp1, prp2);
}
@@ -554,11 +605,15 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
- if (nsid == 0 || nsid > n->num_namespaces) {
+ trace_nvme_identify_ns(nsid);
+
+ if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
+ trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
ns = &n->namespaces[nsid - 1];
+
return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
prp1, prp2);
}
@@ -573,6 +628,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
uint16_t ret;
int i, j = 0;
+ trace_nvme_identify_nslist(min_nsid);
+
list = g_malloc0(data_len);
for (i = 0; i < n->num_namespaces; i++) {
if (i < min_nsid) {
@@ -601,6 +658,7 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
case 0x02:
return nvme_identify_nslist(n, c);
default:
+ trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
return NVME_INVALID_FIELD | NVME_DNR;
}
}
@@ -613,11 +671,14 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
switch (dw10) {
case NVME_VOLATILE_WRITE_CACHE:
result = blk_enable_write_cache(n->conf.blk);
+ trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
break;
case NVME_NUMBER_OF_QUEUES:
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
+ trace_nvme_getfeat_numq(result);
break;
default:
+ trace_nvme_err_invalid_getfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -635,10 +696,14 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
break;
case NVME_NUMBER_OF_QUEUES:
+ trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
+ ((dw11 >> 16) & 0xFFFF) + 1,
+ n->num_queues - 1, n->num_queues - 1);
req->cqe.result =
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
break;
default:
+ trace_nvme_err_invalid_setfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
}
return NVME_SUCCESS;
@@ -662,6 +727,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_ADM_CMD_GET_FEATURES:
return nvme_get_feature(n, cmd, req);
default:
+ trace_nvme_err_invalid_admin_opc(cmd->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -721,15 +787,78 @@ static int nvme_start_ctrl(NvmeCtrl *n)
uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
uint32_t page_size = 1 << page_bits;
- if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq ||
- n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) ||
- NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) ||
- NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) ||
- NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) ||
- NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) ||
- NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) ||
- NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) ||
- !NVME_AQA_ASQS(n->bar.aqa) || !NVME_AQA_ACQS(n->bar.aqa)) {
+ if (unlikely(n->cq[0])) {
+ trace_nvme_err_startfail_cq();
+ return -1;
+ }
+ if (unlikely(n->sq[0])) {
+ trace_nvme_err_startfail_sq();
+ return -1;
+ }
+ if (unlikely(!n->bar.asq)) {
+ trace_nvme_err_startfail_nbarasq();
+ return -1;
+ }
+ if (unlikely(!n->bar.acq)) {
+ trace_nvme_err_startfail_nbaracq();
+ return -1;
+ }
+ if (unlikely(n->bar.asq & (page_size - 1))) {
+ trace_nvme_err_startfail_asq_misaligned(n->bar.asq);
+ return -1;
+ }
+ if (unlikely(n->bar.acq & (page_size - 1))) {
+ trace_nvme_err_startfail_acq_misaligned(n->bar.acq);
+ return -1;
+ }
+ if (unlikely(NVME_CC_MPS(n->bar.cc) <
+ NVME_CAP_MPSMIN(n->bar.cap))) {
+ trace_nvme_err_startfail_page_too_small(
+ NVME_CC_MPS(n->bar.cc),
+ NVME_CAP_MPSMIN(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_MPS(n->bar.cc) >
+ NVME_CAP_MPSMAX(n->bar.cap))) {
+ trace_nvme_err_startfail_page_too_large(
+ NVME_CC_MPS(n->bar.cc),
+ NVME_CAP_MPSMAX(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
+ NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
+ trace_nvme_err_startfail_cqent_too_small(
+ NVME_CC_IOCQES(n->bar.cc),
+ NVME_CTRL_CQES_MIN(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
+ NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
+ trace_nvme_err_startfail_cqent_too_large(
+ NVME_CC_IOCQES(n->bar.cc),
+ NVME_CTRL_CQES_MAX(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
+ NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
+ trace_nvme_err_startfail_sqent_too_small(
+ NVME_CC_IOSQES(n->bar.cc),
+ NVME_CTRL_SQES_MIN(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
+ NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
+ trace_nvme_err_startfail_sqent_too_large(
+ NVME_CC_IOSQES(n->bar.cc),
+ NVME_CTRL_SQES_MAX(n->bar.cap));
+ return -1;
+ }
+ if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
+ trace_nvme_err_startfail_asqent_sz_zero();
+ return -1;
+ }
+ if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
+ trace_nvme_err_startfail_acqent_sz_zero();
return -1;
}
@@ -749,16 +878,48 @@ static int nvme_start_ctrl(NvmeCtrl *n)
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
unsigned size)
{
+ if (unlikely(offset & (sizeof(uint32_t) - 1))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32,
+ "MMIO write not 32-bit aligned,"
+ " offset=0x%"PRIx64"", offset);
+ /* should be ignored, fall through for now */
+ }
+
+ if (unlikely(size < sizeof(uint32_t))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall,
+ "MMIO write smaller than 32-bits,"
+ " offset=0x%"PRIx64", size=%u",
+ offset, size);
+ /* should be ignored, fall through for now */
+ }
+
switch (offset) {
- case 0xc:
+ case 0xc: /* INTMS */
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
+ "undefined access to interrupt mask set"
+ " when MSI-X is enabled");
+ /* should be ignored, fall through for now */
+ }
n->bar.intms |= data & 0xffffffff;
n->bar.intmc = n->bar.intms;
+ trace_nvme_mmio_intm_set(data & 0xffffffff,
+ n->bar.intmc);
break;
- case 0x10:
+ case 0x10: /* INTMC */
+ if (unlikely(msix_enabled(&(n->parent_obj)))) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
+ "undefined access to interrupt mask clr"
+ " when MSI-X is enabled");
+ /* should be ignored, fall through for now */
+ }
n->bar.intms &= ~(data & 0xffffffff);
n->bar.intmc = n->bar.intms;
+ trace_nvme_mmio_intm_clr(data & 0xffffffff,
+ n->bar.intmc);
break;
- case 0x14:
+ case 0x14: /* CC */
+ trace_nvme_mmio_cfg(data & 0xffffffff);
/* Windows first sends data, then sends enable bit */
if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
!NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
@@ -768,40 +929,82 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
n->bar.cc = data;
- if (nvme_start_ctrl(n)) {
+ if (unlikely(nvme_start_ctrl(n))) {
+ trace_nvme_err_startfail();
n->bar.csts = NVME_CSTS_FAILED;
} else {
+ trace_nvme_mmio_start_success();
n->bar.csts = NVME_CSTS_READY;
}
} else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
+ trace_nvme_mmio_stopped();
nvme_clear_ctrl(n);
n->bar.csts &= ~NVME_CSTS_READY;
}
if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
- nvme_clear_ctrl(n);
- n->bar.cc = data;
- n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
+ trace_nvme_mmio_shutdown_set();
+ nvme_clear_ctrl(n);
+ n->bar.cc = data;
+ n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
} else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
- n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
- n->bar.cc = data;
+ trace_nvme_mmio_shutdown_cleared();
+ n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
+ n->bar.cc = data;
+ }
+ break;
+ case 0x1C: /* CSTS */
+ if (data & (1 << 4)) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported,
+ "attempted to W1C CSTS.NSSRO"
+ " but CAP.NSSRS is zero (not supported)");
+ } else if (data != 0) {
+ NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts,
+ "attempted to set a read only bit"
+ " of controller status");
+ }
+ break;
+ case 0x20: /* NSSR */
+ if (data == 0x4E564D65) {
+ trace_nvme_ub_mmiowr_ssreset_unsupported();
+ } else {
+ /* The spec says that writes of other values have no effect */
+ return;
}
break;
- case 0x24:
+ case 0x24: /* AQA */
n->bar.aqa = data & 0xffffffff;
+ trace_nvme_mmio_aqattr(data & 0xffffffff);
break;
- case 0x28:
+ case 0x28: /* ASQ */
n->bar.asq = data;
+ trace_nvme_mmio_asqaddr(data);
break;
- case 0x2c:
+ case 0x2c: /* ASQ hi */
n->bar.asq |= data << 32;
+ trace_nvme_mmio_asqaddr_hi(data, n->bar.asq);
break;
- case 0x30:
+ case 0x30: /* ACQ */
+ trace_nvme_mmio_acqaddr(data);
n->bar.acq = data;
break;
- case 0x34:
+ case 0x34: /* ACQ hi */
n->bar.acq |= data << 32;
+ trace_nvme_mmio_acqaddr_hi(data, n->bar.acq);
break;
+ case 0x38: /* CMBLOC */
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved,
+ "invalid write to reserved CMBLOC"
+ " when CMBSZ is zero, ignored");
+ return;
+ case 0x3C: /* CMBSZ */
+ NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly,
+ "invalid write to read only CMBSZ, ignored");
+ return;
default:
+ NVME_GUEST_ERR(nvme_ub_mmiowr_invalid,
+ "invalid MMIO write,"
+ " offset=0x%"PRIx64", data=%"PRIx64"",
+ offset, data);
break;
}
}
@@ -812,9 +1015,26 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
uint8_t *ptr = (uint8_t *)&n->bar;
uint64_t val = 0;
+ if (unlikely(addr & (sizeof(uint32_t) - 1))) {
+ NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32,
+ "MMIO read not 32-bit aligned,"
+ " offset=0x%"PRIx64"", addr);
+ /* should RAZ, fall through for now */
+ } else if (unlikely(size < sizeof(uint32_t))) {
+ NVME_GUEST_ERR(nvme_ub_mmiord_toosmall,
+ "MMIO read smaller than 32-bits,"
+ " offset=0x%"PRIx64"", addr);
+ /* should RAZ, fall through for now */
+ }
+
if (addr < sizeof(n->bar)) {
memcpy(&val, ptr + addr, size);
+ } else {
+ NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs,
+ "MMIO read beyond last register,"
+ " offset=0x%"PRIx64", returning 0", addr);
}
+
return val;
}
@@ -822,22 +1042,36 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
{
uint32_t qid;
- if (addr & ((1 << 2) - 1)) {
+ if (unlikely(addr & ((1 << 2) - 1))) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_misaligned,
+ "doorbell write not 32-bit aligned,"
+ " offset=0x%"PRIx64", ignoring", addr);
return;
}
if (((addr - 0x1000) >> 2) & 1) {
+ /* Completion queue doorbell write */
+
uint16_t new_head = val & 0xffff;
int start_sqs;
NvmeCQueue *cq;
qid = (addr - (0x1000 + (1 << 2))) >> 3;
- if (nvme_check_cqid(n, qid)) {
+ if (unlikely(nvme_check_cqid(n, qid))) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq,
+ "completion queue doorbell write"
+ " for nonexistent queue,"
+ " sqid=%"PRIu32", ignoring", qid);
return;
}
cq = n->cq[qid];
- if (new_head >= cq->size) {
+ if (unlikely(new_head >= cq->size)) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead,
+ "completion queue doorbell write value"
+ " beyond queue size, sqid=%"PRIu32","
+ " new_head=%"PRIu16", ignoring",
+ qid, new_head);
return;
}
@@ -855,16 +1089,27 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
nvme_isr_notify(n, cq);
}
} else {
+ /* Submission queue doorbell write */
+
uint16_t new_tail = val & 0xffff;
NvmeSQueue *sq;
qid = (addr - 0x1000) >> 3;
- if (nvme_check_sqid(n, qid)) {
+ if (unlikely(nvme_check_sqid(n, qid))) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq,
+ "submission queue doorbell write"
+ " for nonexistent queue,"
+ " sqid=%"PRIu32", ignoring", qid);
return;
}
sq = n->sq[qid];
- if (new_tail >= sq->size) {
+ if (unlikely(new_tail >= sq->size)) {
+ NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail,
+ "submission queue doorbell write value"
+ " beyond queue size, sqid=%"PRIu32","
+ " new_tail=%"PRIu16", ignoring",
+ qid, new_tail);
return;
}
@@ -920,7 +1165,7 @@ static const MemoryRegionOps nvme_cmb_ops = {
},
};
-static int nvme_init(PCIDevice *pci_dev)
+static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{
NvmeCtrl *n = NVME(pci_dev);
NvmeIdCtrl *id = &n->id_ctrl;
@@ -928,27 +1173,27 @@ static int nvme_init(PCIDevice *pci_dev)
int i;
int64_t bs_size;
uint8_t *pci_conf;
- Error *local_err = NULL;
if (!n->conf.blk) {
- return -1;
+ error_setg(errp, "drive property not set");
+ return;
}
bs_size = blk_getlength(n->conf.blk);
if (bs_size < 0) {
- return -1;
+ error_setg(errp, "could not get backing file size");
+ return;
}
blkconf_serial(&n->conf, &n->serial);
if (!n->serial) {
- return -1;
+ error_setg(errp, "serial property not set");
+ return;
}
blkconf_blocksizes(&n->conf);
- blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
- false, &local_err);
- if (local_err) {
- error_report_err(local_err);
- return -1;
+ if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
+ false, errp)) {
+ return;
}
pci_conf = pci_dev->config;
@@ -1046,7 +1291,6 @@ static int nvme_init(PCIDevice *pci_dev)
cpu_to_le64(n->ns_size >>
id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
}
- return 0;
}
static void nvme_exit(PCIDevice *pci_dev)
@@ -1081,7 +1325,7 @@ static void nvme_class_init(ObjectClass *oc, void *data)
DeviceClass *dc = DEVICE_CLASS(oc);
PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
- pc->init = nvme_init;
+ pc->realize = nvme_realize;
pc->exit = nvme_exit;
pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
pc->vendor_id = PCI_VENDOR_ID_INTEL;
diff --git a/hw/block/trace-events b/hw/block/trace-events
index cb6767b3ee..5acd495207 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -10,3 +10,103 @@ virtio_blk_submit_multireq(void *vdev, void *mrb, int start, int num_reqs, uint6
# hw/block/hd-geometry.c
hd_geometry_lchs_guess(void *blk, int cyls, int heads, int secs) "blk %p LCHS %d %d %d"
hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int trans) "blk %p CHS %u %u %u trans %d"
+
+# hw/block/nvme.c
+# nvme traces for successful events
+nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
+nvme_irq_pin(void) "pulsing IRQ pin"
+nvme_irq_masked(void) "IRQ is masked"
+nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
+nvme_rw(char const *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
+nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
+nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
+nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
+nvme_del_cq(uint16_t cqid) "deleted completion queue, sqid=%"PRIu16""
+nvme_identify_ctrl(void) "identify controller"
+nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
+nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
+nvme_getfeat_vwcache(char const* result) "get feature volatile write cache, result=%s"
+nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
+nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
+nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64""
+nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64""
+nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64""
+nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64""
+nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64""
+nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64""
+nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
+nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
+nvme_mmio_start_success(void) "setting controller enable bit succeeded"
+nvme_mmio_stopped(void) "cleared controller enable bit"
+nvme_mmio_shutdown_set(void) "shutdown bit set"
+nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
+
+# nvme traces for error conditions
+nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
+nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
+nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
+nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred"
+nvme_err_invalid_field(void) "invalid field"
+nvme_err_invalid_prp(void) "invalid PRP"
+nvme_err_invalid_sgl(void) "invalid SGL"
+nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u"
+nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8""
+nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8""
+nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64""
+nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16""
+nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16""
+nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16""
+nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16""
+nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64""
+nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16""
+nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16""
+nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16""
+nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16""
+nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16""
+nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64""
+nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16""
+nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16""
+nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16""
+nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32""
+nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32""
+nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
+nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
+nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"
+nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null"
+nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64""
+nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64""
+nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u"
+nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u"
+nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u"
+nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u"
+nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u"
+nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u"
+nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero"
+nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero"
+nvme_err_startfail(void) "setting controller enable bit failed"
+
+# Traces for undefined behavior
+nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64""
+nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u"
+nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled"
+nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status"
+nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)"
+nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)"
+nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored"
+nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored"
+nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64""
+nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64""
+nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64""
+nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0"
+nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring"
+nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring"
+nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring"
+nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring"
+nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring"
+
+# hw/block/xen_disk.c
+xen_disk_alloc(char *name) "%s"
+xen_disk_init(char *name) "%s"
+xen_disk_connect(char *name) "%s"
+xen_disk_disconnect(char *name) "%s"
+xen_disk_free(char *name) "%s"
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 05d1440786..b1532e4e91 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -928,23 +928,34 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
error_setg(errp, "num-queues property must be larger than 0");
return;
}
+ if (!is_power_of_2(conf->queue_size) ||
+ conf->queue_size > VIRTQUEUE_MAX_SIZE) {
+ error_setg(errp, "invalid queue-size property (%" PRIu16 "), "
+ "must be a power of 2 (max %d)",
+ conf->queue_size, VIRTQUEUE_MAX_SIZE);
+ return;
+ }
blkconf_serial(&conf->conf, &conf->serial);
- blkconf_apply_backend_options(&conf->conf,
- blk_is_read_only(conf->conf.blk), true,
- &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_apply_backend_options(&conf->conf,
+ blk_is_read_only(conf->conf.blk), true,
+ errp)) {
return;
}
s->original_wce = blk_enable_write_cache(conf->conf.blk);
- blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, errp)) {
return;
}
+
blkconf_blocksizes(&conf->conf);
+ if (conf->conf.logical_block_size >
+ conf->conf.physical_block_size) {
+ error_setg(errp,
+ "logical_block_size > physical_block_size not supported");
+ return;
+ }
+
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
sizeof(struct virtio_blk_config));
@@ -953,7 +964,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
for (i = 0; i < conf->num_queues; i++) {
- virtio_add_queue(vdev, 128, virtio_blk_handle_output);
+ virtio_add_queue(vdev, conf->queue_size, virtio_blk_handle_output);
}
virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
if (err != NULL) {
@@ -1012,6 +1023,7 @@ static Property virtio_blk_properties[] = {
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
true),
DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
+ DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128),
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
IOThread *),
DEFINE_PROP_END_OF_LIST(),
diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index e431bd89e8..f74fcd42d1 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -27,10 +27,12 @@
#include "hw/xen/xen_backend.h"
#include "xen_blkif.h"
#include "sysemu/blockdev.h"
+#include "sysemu/iothread.h"
#include "sysemu/block-backend.h"
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
+#include "trace.h"
/* ------------------------------------------------------------- */
@@ -125,6 +127,9 @@ struct XenBlkDev {
DriveInfo *dinfo;
BlockBackend *blk;
QEMUBH *bh;
+
+ IOThread *iothread;
+ AioContext *ctx;
};
/* ------------------------------------------------------------- */
@@ -596,9 +601,12 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
static void qemu_aio_complete(void *opaque, int ret)
{
struct ioreq *ioreq = opaque;
+ struct XenBlkDev *blkdev = ioreq->blkdev;
+
+ aio_context_acquire(blkdev->ctx);
if (ret != 0) {
- xen_pv_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
+ xen_pv_printf(&blkdev->xendev, 0, "%s I/O error\n",
ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
ioreq->aio_errors++;
}
@@ -607,10 +615,10 @@ static void qemu_aio_complete(void *opaque, int ret)
if (ioreq->presync) {
ioreq->presync = 0;
ioreq_runio_qemu_aio(ioreq);
- return;
+ goto done;
}
if (ioreq->aio_inflight > 0) {
- return;
+ goto done;
}
if (xen_feature_grant_copy) {
@@ -647,16 +655,19 @@ static void qemu_aio_complete(void *opaque, int ret)
}
case BLKIF_OP_READ:
if (ioreq->status == BLKIF_RSP_OKAY) {
- block_acct_done(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
+ block_acct_done(blk_get_stats(blkdev->blk), &ioreq->acct);
} else {
- block_acct_failed(blk_get_stats(ioreq->blkdev->blk), &ioreq->acct);
+ block_acct_failed(blk_get_stats(blkdev->blk), &ioreq->acct);
}
break;
case BLKIF_OP_DISCARD:
default:
break;
}
- qemu_bh_schedule(ioreq->blkdev->bh);
+ qemu_bh_schedule(blkdev->bh);
+
+done:
+ aio_context_release(blkdev->ctx);
}
static bool blk_split_discard(struct ioreq *ioreq, blkif_sector_t sector_number,
@@ -913,17 +924,29 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
static void blk_bh(void *opaque)
{
struct XenBlkDev *blkdev = opaque;
+
+ aio_context_acquire(blkdev->ctx);
blk_handle_requests(blkdev);
+ aio_context_release(blkdev->ctx);
}
static void blk_alloc(struct XenDevice *xendev)
{
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
+ Error *err = NULL;
+
+ trace_xen_disk_alloc(xendev->name);
QLIST_INIT(&blkdev->inflight);
QLIST_INIT(&blkdev->finished);
QLIST_INIT(&blkdev->freelist);
- blkdev->bh = qemu_bh_new(blk_bh, blkdev);
+
+ blkdev->iothread = iothread_create(xendev->name, &err);
+ assert(!err);
+
+ blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
+ blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
+
if (xen_mode != XEN_EMULATE) {
batch_maps = 1;
}
@@ -950,6 +973,8 @@ static int blk_init(struct XenDevice *xendev)
int info = 0;
char *directiosafe = NULL;
+ trace_xen_disk_init(xendev->name);
+
/* read xenstore entries */
if (blkdev->params == NULL) {
char *h = NULL;
@@ -1062,6 +1087,8 @@ static int blk_connect(struct XenDevice *xendev)
unsigned int i;
uint32_t *domids;
+ trace_xen_disk_connect(xendev->name);
+
/* read-only ? */
if (blkdev->directiosafe) {
qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
@@ -1287,6 +1314,8 @@ static int blk_connect(struct XenDevice *xendev)
blkdev->persistent_gnt_count = 0;
}
+ blk_set_aio_context(blkdev->blk, blkdev->ctx);
+
xen_be_bind_evtchn(&blkdev->xendev);
xen_pv_printf(&blkdev->xendev, 1, "ok: proto %s, nr-ring-ref %u, "
@@ -1300,13 +1329,20 @@ static void blk_disconnect(struct XenDevice *xendev)
{
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
+ trace_xen_disk_disconnect(xendev->name);
+
+ aio_context_acquire(blkdev->ctx);
+
if (blkdev->blk) {
+ blk_set_aio_context(blkdev->blk, qemu_get_aio_context());
blk_detach_dev(blkdev->blk, blkdev);
blk_unref(blkdev->blk);
blkdev->blk = NULL;
}
xen_pv_unbind_evtchn(&blkdev->xendev);
+ aio_context_release(blkdev->ctx);
+
if (blkdev->sring) {
xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
blkdev->nr_ring_ref);
@@ -1345,6 +1381,8 @@ static int blk_free(struct XenDevice *xendev)
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
struct ioreq *ioreq;
+ trace_xen_disk_free(xendev->name);
+
blk_disconnect(xendev);
while (!QLIST_EMPTY(&blkdev->freelist)) {
@@ -1360,6 +1398,7 @@ static int blk_free(struct XenDevice *xendev)
g_free(blkdev->dev);
g_free(blkdev->devtype);
qemu_bh_delete(blkdev->bh);
+ iothread_destroy(blkdev->iothread);
return 0;
}
diff --git a/hw/char/debugcon.c b/hw/char/debugcon.c
index 95ccec6f8b..e2abc61b04 100644
--- a/hw/char/debugcon.c
+++ b/hw/char/debugcon.c
@@ -29,7 +29,6 @@
#include "hw/hw.h"
#include "chardev/char-fe.h"
#include "hw/isa/isa.h"
-#include "hw/i386/pc.h"
#define TYPE_ISA_DEBUGCON_DEVICE "isa-debugcon"
#define ISA_DEBUGCON_DEVICE(obj) \
diff --git a/hw/char/xen_console.c b/hw/char/xen_console.c
index 3643dfe067..5e68326c19 100644
--- a/hw/char/xen_console.c
+++ b/hw/char/xen_console.c
@@ -27,7 +27,6 @@
#include "hw/hw.h"
#include "chardev/char-fe.h"
#include "hw/xen/xen_backend.h"
-#include "qapi/error.h"
#include <xen/io/console.h>
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 36c2fb069c..c857f3f934 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -20,7 +20,6 @@
#include "sysemu/numa.h"
#include "qemu/error-report.h"
#include "qemu/cutils.h"
-#include "sysemu/numa.h"
#include "sysemu/qtest.h"
static char *machine_get_accel(Object *obj, Error **errp)
diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index ec10da7424..1d3ba722fa 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -22,6 +22,7 @@
#include "qapi/visitor.h"
#include "chardev/char-fe.h"
#include "sysemu/iothread.h"
+#include "sysemu/tpm_backend.h"
static void get_pointer(Object *obj, Visitor *v, Property *prop,
char *(*print)(void *ptr),
diff --git a/hw/cpu/core.c b/hw/cpu/core.c
index bd578ab80c..7e42e2c87a 100644
--- a/hw/cpu/core.c
+++ b/hw/cpu/core.c
@@ -6,6 +6,7 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
+#include "qemu/osdep.h"
#include "hw/cpu/core.h"
#include "qapi/visitor.h"
#include "qapi/error.h"
diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c
index bc32bf1e39..138ae961b9 100644
--- a/hw/display/cirrus_vga.c
+++ b/hw/display/cirrus_vga.c
@@ -31,7 +31,6 @@
#include "trace.h"
#include "hw/hw.h"
#include "hw/pci/pci.h"
-#include "ui/console.h"
#include "ui/pixel_ops.h"
#include "vga_int.h"
#include "hw/loader.h"
diff --git a/hw/display/qxl.h b/hw/display/qxl.h
index f6556adb73..8668a8e05a 100644
--- a/hw/display/qxl.h
+++ b/hw/display/qxl.h
@@ -3,7 +3,6 @@
#include "qemu-common.h"
-#include "ui/console.h"
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "vga_int.h"
diff --git a/hw/display/tc6393xb.c b/hw/display/tc6393xb.c
index 74d10af3d4..0ae63605f0 100644
--- a/hw/display/tc6393xb.c
+++ b/hw/display/tc6393xb.c
@@ -172,6 +172,7 @@ static void tc6393xb_gpio_handler_update(TC6393xbState *s)
int bit;
level = s->gpio_level & s->gpio_dir;
+ level &= MAKE_64BIT_MASK(0, TC6393XB_GPIOS);
for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) {
bit = ctz32(diff);
diff --git a/hw/display/vga-isa-mm.c b/hw/display/vga-isa-mm.c
index 51ccbccc41..e887b45651 100644
--- a/hw/display/vga-isa-mm.c
+++ b/hw/display/vga-isa-mm.c
@@ -23,11 +23,9 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "ui/console.h"
-#include "hw/i386/pc.h"
+#include "hw/display/vga.h"
#include "vga_int.h"
#include "ui/pixel_ops.h"
-#include "qemu/timer.h"
#define VGA_RAM_SIZE (8192 * 1024)
diff --git a/hw/display/vga-isa.c b/hw/display/vga-isa.c
index 1af95562f2..469834add5 100644
--- a/hw/display/vga-isa.c
+++ b/hw/display/vga-isa.c
@@ -25,8 +25,7 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "ui/console.h"
-#include "hw/i386/pc.h"
+#include "hw/isa/isa.h"
#include "vga_int.h"
#include "ui/pixel_ops.h"
#include "qemu/timer.h"
diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c
index 7adb89fcb4..1674bd3581 100644
--- a/hw/display/vga-pci.c
+++ b/hw/display/vga-pci.c
@@ -25,7 +25,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "ui/console.h"
#include "hw/pci/pci.h"
#include "vga_int.h"
#include "ui/pixel_ops.h"
diff --git a/hw/display/vga.c b/hw/display/vga.c
index a64a0942da..a0412000a5 100644
--- a/hw/display/vga.c
+++ b/hw/display/vga.c
@@ -24,11 +24,10 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/hw.h"
-#include "vga.h"
-#include "ui/console.h"
-#include "hw/i386/pc.h"
+#include "hw/display/vga.h"
#include "hw/pci/pci.h"
#include "vga_int.h"
+#include "vga_regs.h"
#include "ui/pixel_ops.h"
#include "qemu/timer.h"
#include "hw/xen/xen.h"
diff --git a/hw/display/vga_int.h b/hw/display/vga_int.h
index ad34a1f048..fe23b81442 100644
--- a/hw/display/vga_int.h
+++ b/hw/display/vga_int.h
@@ -25,8 +25,9 @@
#ifndef HW_VGA_INT_H
#define HW_VGA_INT_H
-#include "hw/hw.h"
+#include "exec/ioport.h"
#include "exec/memory.h"
+#include "ui/console.h"
#define ST01_V_RETRACE 0x08
#define ST01_DISP_ENABLE 0x01
diff --git a/hw/display/vga.h b/hw/display/vga_regs.h
index 16886f5eed..16886f5eed 100644
--- a/hw/display/vga.h
+++ b/hw/display/vga_regs.h
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
index f9b017d86b..baa74ba82c 100644
--- a/hw/display/virtio-vga.c
+++ b/hw/display/virtio-vga.c
@@ -1,7 +1,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/pci/pci.h"
-#include "ui/console.h"
#include "vga_int.h"
#include "hw/virtio/virtio-pci.h"
#include "qapi/error.h"
diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c
index 0e6673a911..bd3e8b3586 100644
--- a/hw/display/vmware_vga.c
+++ b/hw/display/vmware_vga.c
@@ -26,7 +26,6 @@
#include "hw/hw.h"
#include "hw/loader.h"
#include "trace.h"
-#include "ui/console.h"
#include "ui/vnc.h"
#include "hw/pci/pci.h"
diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c
index 8e2547ac05..d4fc0fa5f2 100644
--- a/hw/display/xenfb.c
+++ b/hw/display/xenfb.c
@@ -27,6 +27,7 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
+#include "ui/input.h"
#include "ui/console.h"
#include "hw/xen/xen_backend.h"
@@ -51,9 +52,11 @@ struct common {
struct XenInput {
struct common c;
int abs_pointer_wanted; /* Whether guest supports absolute pointer */
- int button_state; /* Last seen pointer button state */
- int extended;
- QEMUPutMouseEntry *qmouse;
+ int raw_pointer_wanted; /* Whether guest supports raw (unscaled) pointer */
+ QemuInputHandlerState *qkbd;
+ QemuInputHandlerState *qmou;
+ int axis[INPUT_AXIS__MAX];
+ int wheel;
};
#define UP_QUEUE 8
@@ -119,79 +122,6 @@ static void common_unbind(struct common *c)
}
/* -------------------------------------------------------------------- */
-
-#if 0
-/*
- * These two tables are not needed any more, but left in here
- * intentionally as documentation, to show how scancode2linux[]
- * was generated.
- *
- * Tables to map from scancode to Linux input layer keycode.
- * Scancodes are hardware-specific. These maps assumes a
- * standard AT or PS/2 keyboard which is what QEMU feeds us.
- */
-const unsigned char atkbd_set2_keycode[512] = {
-
- 0, 67, 65, 63, 61, 59, 60, 88, 0, 68, 66, 64, 62, 15, 41,117,
- 0, 56, 42, 93, 29, 16, 2, 0, 0, 0, 44, 31, 30, 17, 3, 0,
- 0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183,
- 0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185,
- 0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0,
- 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85,
- 0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0,
- 82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 217,100,255, 0, 97,165, 0, 0,156, 0, 0, 0, 0, 0, 0,125,
- 173,114, 0,113, 0, 0, 0,126,128, 0, 0,140, 0, 0, 0,127,
- 159, 0,115, 0,164, 0, 0,116,158, 0,150,166, 0, 0, 0,142,
- 157, 0, 0, 0, 0, 0, 0, 0,155, 0, 98, 0, 0,163, 0, 0,
- 226, 0, 0, 0, 0, 0, 0, 0, 0,255, 96, 0, 0, 0,143, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0,107, 0,105,102, 0, 0,112,
- 110,111,108,112,106,103, 0,119, 0,118,109, 0, 99,104,119, 0,
-
-};
-
-const unsigned char atkbd_unxlate_table[128] = {
-
- 0,118, 22, 30, 38, 37, 46, 54, 61, 62, 70, 69, 78, 85,102, 13,
- 21, 29, 36, 45, 44, 53, 60, 67, 68, 77, 84, 91, 90, 20, 28, 27,
- 35, 43, 52, 51, 59, 66, 75, 76, 82, 14, 18, 93, 26, 34, 33, 42,
- 50, 49, 58, 65, 73, 74, 89,124, 17, 41, 88, 5, 6, 4, 12, 3,
- 11, 2, 10, 1, 9,119,126,108,117,125,123,107,115,116,121,105,
- 114,122,112,113,127, 96, 97,120, 7, 15, 23, 31, 39, 47, 55, 63,
- 71, 79, 86, 94, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 87,111,
- 19, 25, 57, 81, 83, 92, 95, 98, 99,100,101,103,104,106,109,110
-
-};
-#endif
-
-/*
- * for (i = 0; i < 128; i++) {
- * scancode2linux[i] = atkbd_set2_keycode[atkbd_unxlate_table[i]];
- * scancode2linux[i | 0x80] = atkbd_set2_keycode[atkbd_unxlate_table[i] | 0x80];
- * }
- */
-static const unsigned char scancode2linux[512] = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 99, 0, 86, 87, 88,117, 0, 0, 95,183,184,185,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 93, 0, 0, 89, 0, 0, 85, 91, 90, 92, 0, 94, 0,124,121, 0,
-
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 165, 0, 0, 0, 0, 0, 0, 0, 0,163, 0, 0, 96, 97, 0, 0,
- 113,140,164, 0,166, 0, 0, 0, 0, 0,255, 0, 0, 0,114, 0,
- 115, 0,150, 0, 0, 98,255, 99,100, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0,119,119,102,103,104, 0,105,112,106,118,107,
- 108,109,110,111, 0, 0, 0, 0, 0, 0, 0,125,126,127,116,142,
- 0, 0, 0,143, 0,217,156,173,128,159,158,157,155,226, 0,112,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-};
-
/* Send an event to the keyboard frontend driver */
static int xenfb_kbd_event(struct XenInput *xenfb,
union xenkbd_in_event *event)
@@ -262,36 +192,28 @@ static int xenfb_send_position(struct XenInput *xenfb,
/*
* Send a key event from the client to the guest OS
- * QEMU gives us a raw scancode from an AT / PS/2 style keyboard.
+ * QEMU gives us a QCode.
* We have to turn this into a Linux Input layer keycode.
*
- * Extra complexity from the fact that with extended scancodes
- * (like those produced by arrow keys) this method gets called
- * twice, but we only want to send a single event. So we have to
- * track the '0xe0' scancode state & collapse the extended keys
- * as needed.
- *
* Wish we could just send scancodes straight to the guest which
* already has code for dealing with this...
*/
-static void xenfb_key_event(void *opaque, int scancode)
+static void xenfb_key_event(DeviceState *dev, QemuConsole *src,
+ InputEvent *evt)
{
- struct XenInput *xenfb = opaque;
- int down = 1;
+ struct XenInput *xenfb = (struct XenInput *)dev;
+ InputKeyEvent *key = evt->u.key.data;
+ int qcode = qemu_input_key_value_to_qcode(key->key);
+ int lnx;
- if (scancode == 0xe0) {
- xenfb->extended = 1;
- return;
- } else if (scancode & 0x80) {
- scancode &= 0x7f;
- down = 0;
- }
- if (xenfb->extended) {
- scancode |= 0x80;
- xenfb->extended = 0;
+ if (qcode < qemu_input_map_qcode_to_linux_len) {
+ lnx = qemu_input_map_qcode_to_linux[qcode];
+
+ if (lnx) {
+ trace_xenfb_key_event(xenfb, lnx, key->down);
+ xenfb_send_key(xenfb, key->down, lnx);
+ }
}
- trace_xenfb_key_event(opaque, scancode2linux[scancode], down);
- xenfb_send_key(xenfb, down, scancode2linux[scancode]);
}
/*
@@ -303,48 +225,126 @@ static void xenfb_key_event(void *opaque, int scancode)
* given any button up/down events, so have to track changes in
* the button state.
*/
-static void xenfb_mouse_event(void *opaque,
- int dx, int dy, int dz, int button_state)
+static void xenfb_mouse_event(DeviceState *dev, QemuConsole *src,
+ InputEvent *evt)
{
- struct XenInput *xenfb = opaque;
- QemuConsole *con = qemu_console_lookup_by_index(0);
+ struct XenInput *xenfb = (struct XenInput *)dev;
+ InputBtnEvent *btn;
+ InputMoveEvent *move;
+ QemuConsole *con;
DisplaySurface *surface;
- int dw, dh, i;
+ int scale;
+
+ switch (evt->type) {
+ case INPUT_EVENT_KIND_BTN:
+ btn = evt->u.btn.data;
+ switch (btn->button) {
+ case INPUT_BUTTON_LEFT:
+ xenfb_send_key(xenfb, btn->down, BTN_LEFT);
+ break;
+ case INPUT_BUTTON_RIGHT:
+ xenfb_send_key(xenfb, btn->down, BTN_LEFT + 1);
+ break;
+ case INPUT_BUTTON_MIDDLE:
+ xenfb_send_key(xenfb, btn->down, BTN_LEFT + 2);
+ break;
+ case INPUT_BUTTON_WHEEL_UP:
+ if (btn->down) {
+ xenfb->wheel--;
+ }
+ break;
+ case INPUT_BUTTON_WHEEL_DOWN:
+ if (btn->down) {
+ xenfb->wheel++;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case INPUT_EVENT_KIND_ABS:
+ move = evt->u.abs.data;
+ if (xenfb->raw_pointer_wanted) {
+ xenfb->axis[move->axis] = move->value;
+ } else {
+ con = qemu_console_lookup_by_index(0);
+ if (!con) {
+ xen_pv_printf(&xenfb->c.xendev, 0, "No QEMU console available");
+ return;
+ }
+ surface = qemu_console_surface(con);
+ switch (move->axis) {
+ case INPUT_AXIS_X:
+ scale = surface_width(surface) - 1;
+ break;
+ case INPUT_AXIS_Y:
+ scale = surface_height(surface) - 1;
+ break;
+ default:
+ scale = 0x8000;
+ break;
+ }
+ xenfb->axis[move->axis] = move->value * scale / 0x7fff;
+ }
+ break;
- if (!con) {
- xen_pv_printf(&xenfb->c.xendev, 0, "No QEMU console available");
- return;
+ case INPUT_EVENT_KIND_REL:
+ move = evt->u.rel.data;
+ xenfb->axis[move->axis] += move->value;
+ break;
+
+ default:
+ break;
}
+}
- surface = qemu_console_surface(con);
- dw = surface_width(surface);
- dh = surface_height(surface);
+static void xenfb_mouse_sync(DeviceState *dev)
+{
+ struct XenInput *xenfb = (struct XenInput *)dev;
- trace_xenfb_mouse_event(opaque, dx, dy, dz, button_state,
+ trace_xenfb_mouse_event(xenfb, xenfb->axis[INPUT_AXIS_X],
+ xenfb->axis[INPUT_AXIS_Y],
+ xenfb->wheel, 0,
xenfb->abs_pointer_wanted);
- if (xenfb->abs_pointer_wanted)
- xenfb_send_position(xenfb,
- dx * (dw - 1) / 0x7fff,
- dy * (dh - 1) / 0x7fff,
- dz);
- else
- xenfb_send_motion(xenfb, dx, dy, dz);
-
- for (i = 0 ; i < 8 ; i++) {
- int lastDown = xenfb->button_state & (1 << i);
- int down = button_state & (1 << i);
- if (down == lastDown)
- continue;
-
- if (xenfb_send_key(xenfb, down, BTN_LEFT+i) < 0)
- return;
- }
- xenfb->button_state = button_state;
+ if (xenfb->abs_pointer_wanted) {
+ xenfb_send_position(xenfb, xenfb->axis[INPUT_AXIS_X],
+ xenfb->axis[INPUT_AXIS_Y],
+ xenfb->wheel);
+ } else {
+ xenfb_send_motion(xenfb, xenfb->axis[INPUT_AXIS_X],
+ xenfb->axis[INPUT_AXIS_Y],
+ xenfb->wheel);
+ xenfb->axis[INPUT_AXIS_X] = 0;
+ xenfb->axis[INPUT_AXIS_Y] = 0;
+ }
+ xenfb->wheel = 0;
}
+static QemuInputHandler xenfb_keyboard = {
+ .name = "Xen PV Keyboard",
+ .mask = INPUT_EVENT_MASK_KEY,
+ .event = xenfb_key_event,
+};
+
+static QemuInputHandler xenfb_abs_mouse = {
+ .name = "Xen PV Mouse",
+ .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_ABS,
+ .event = xenfb_mouse_event,
+ .sync = xenfb_mouse_sync,
+};
+
+static QemuInputHandler xenfb_rel_mouse = {
+ .name = "Xen PV Mouse",
+ .mask = INPUT_EVENT_MASK_BTN | INPUT_EVENT_MASK_REL,
+ .event = xenfb_mouse_event,
+ .sync = xenfb_mouse_sync,
+};
+
static int input_init(struct XenDevice *xendev)
{
xenstore_write_be_int(xendev, "feature-abs-pointer", 1);
+ xenstore_write_be_int(xendev, "feature-raw-pointer", 1);
return 0;
}
@@ -357,7 +357,6 @@ static int input_initialise(struct XenDevice *xendev)
if (rc != 0)
return rc;
- qemu_add_kbd_event_handler(xenfb_key_event, in);
return 0;
}
@@ -369,25 +368,44 @@ static void input_connected(struct XenDevice *xendev)
&in->abs_pointer_wanted) == -1) {
in->abs_pointer_wanted = 0;
}
+ if (xenstore_read_fe_int(xendev, "request-raw-pointer",
+ &in->raw_pointer_wanted) == -1) {
+ in->raw_pointer_wanted = 0;
+ }
+ if (in->raw_pointer_wanted && in->abs_pointer_wanted == 0) {
+ xen_pv_printf(xendev, 0, "raw pointer set without abs pointer");
+ }
- if (in->qmouse) {
- qemu_remove_mouse_event_handler(in->qmouse);
+ if (in->qkbd) {
+ qemu_input_handler_unregister(in->qkbd);
+ }
+ if (in->qmou) {
+ qemu_input_handler_unregister(in->qmou);
}
trace_xenfb_input_connected(xendev, in->abs_pointer_wanted);
- in->qmouse = qemu_add_mouse_event_handler(xenfb_mouse_event, in,
- in->abs_pointer_wanted,
- "Xen PVFB Mouse");
+
+ in->qkbd = qemu_input_handler_register((DeviceState *)in, &xenfb_keyboard);
+ in->qmou = qemu_input_handler_register((DeviceState *)in,
+ in->abs_pointer_wanted ? &xenfb_abs_mouse : &xenfb_rel_mouse);
+
+ if (in->raw_pointer_wanted) {
+ qemu_input_handler_activate(in->qkbd);
+ qemu_input_handler_activate(in->qmou);
+ }
}
static void input_disconnect(struct XenDevice *xendev)
{
struct XenInput *in = container_of(xendev, struct XenInput, c.xendev);
- if (in->qmouse) {
- qemu_remove_mouse_event_handler(in->qmouse);
- in->qmouse = NULL;
+ if (in->qkbd) {
+ qemu_input_handler_unregister(in->qkbd);
+ in->qkbd = NULL;
+ }
+ if (in->qmou) {
+ qemu_input_handler_unregister(in->qmou);
+ in->qmou = NULL;
}
- qemu_add_kbd_event_handler(NULL, NULL);
common_unbind(&in->c);
}
diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c
index 6fc3923f56..a044dd1b27 100644
--- a/hw/i2c/pm_smbus.c
+++ b/hw/i2c/pm_smbus.c
@@ -19,7 +19,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/i2c/pm_smbus.h"
#include "hw/i2c/smbus.h"
diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c
index e47556c9d8..007cb6701d 100644
--- a/hw/i2c/smbus_ich9.c
+++ b/hw/i2c/smbus_ich9.c
@@ -26,7 +26,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/i2c/pm_smbus.h"
#include "hw/pci/pci.h"
#include "sysemu/sysemu.h"
diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
index 2e5e1299ad..fd279e7584 100644
--- a/hw/i386/Makefile.objs
+++ b/hw/i386/Makefile.objs
@@ -5,6 +5,8 @@ obj-y += pc_sysfw.o
obj-y += x86-iommu.o intel_iommu.o
obj-y += amd_iommu.o
obj-$(CONFIG_XEN) += ../xenpv/ xen/
+obj-$(CONFIG_VMPORT) += vmport.o
+obj-$(CONFIG_VMMOUSE) += vmmouse.o
obj-y += kvmvapic.o
obj-y += acpi-build.o
diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c
index 73519ab3ac..18b939e469 100644
--- a/hw/i386/acpi-build.c
+++ b/hw/i386/acpi-build.c
@@ -28,8 +28,8 @@
#include "qemu/error-report.h"
#include "hw/pci/pci.h"
#include "qom/cpu.h"
-#include "hw/i386/pc.h"
#include "target/i386/cpu.h"
+#include "hw/misc/pvpanic.h"
#include "hw/timer/hpet.h"
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
@@ -208,7 +208,7 @@ static void acpi_get_misc_info(AcpiMiscInfo *info)
}
info->has_hpet = hpet_find();
- info->tpm_version = tpm_get_version();
+ info->tpm_version = tpm_get_version(tpm_find());
info->pvpanic_port = pvpanic_port();
info->applesmc_io_base = applesmc_port();
}
@@ -2038,7 +2038,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
}
}
- if (misc->tpm_version != TPM_VERSION_UNSPEC) {
+ if (TPM_IS_TIS(tpm_find())) {
aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE,
TPM_TIS_ADDR_SIZE, AML_READ_WRITE));
}
@@ -2204,7 +2204,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
/* Scan all PCI buses. Generate tables to support hotplug. */
build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en);
- if (misc->tpm_version != TPM_VERSION_UNSPEC) {
+ if (TPM_IS_TIS(tpm_find())) {
dev = aml_device("ISA.TPM");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31")));
aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
@@ -2274,15 +2274,28 @@ build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog)
}
static void
-build_tpm2(GArray *table_data, BIOSLinker *linker)
+build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog)
{
- Acpi20TPM2 *tpm2_ptr;
-
- tpm2_ptr = acpi_data_push(table_data, sizeof *tpm2_ptr);
+ Acpi20TPM2 *tpm2_ptr = acpi_data_push(table_data, sizeof *tpm2_ptr);
+ unsigned log_addr_size = sizeof(tpm2_ptr->log_area_start_address);
+ unsigned log_addr_offset =
+ (char *)&tpm2_ptr->log_area_start_address - table_data->data;
tpm2_ptr->platform_class = cpu_to_le16(TPM2_ACPI_CLASS_CLIENT);
- tpm2_ptr->control_area_address = cpu_to_le64(0);
- tpm2_ptr->start_method = cpu_to_le32(TPM2_START_METHOD_MMIO);
+ if (TPM_IS_TIS(tpm_find())) {
+ tpm2_ptr->control_area_address = cpu_to_le64(0);
+ tpm2_ptr->start_method = cpu_to_le32(TPM2_START_METHOD_MMIO);
+
+ tpm2_ptr->log_area_minimum_length =
+ cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE);
+
+ /* log area start address to be filled by Guest linker */
+ bios_linker_loader_add_pointer(linker,
+ ACPI_BUILD_TABLE_FILE, log_addr_offset, log_addr_size,
+ ACPI_BUILD_TPMLOG_FILE, 0);
+ } else {
+ g_warn_if_reached();
+ }
build_header(linker, table_data,
(void *)tpm2_ptr, "TPM2", sizeof(*tpm2_ptr), 4, NULL, NULL);
@@ -2691,7 +2704,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine)
if (misc.tpm_version == TPM_VERSION_2_0) {
acpi_add_table(table_offsets, tables_blob);
- build_tpm2(tables_blob, tables->linker);
+ build_tpm2(tables_blob, tables->linker, tables->tcpalog);
}
}
if (pcms->numa_nodes) {
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c
index ad8155ca4c..eeaf0e0aa8 100644
--- a/hw/i386/amd_iommu.c
+++ b/hw/i386/amd_iommu.c
@@ -20,7 +20,10 @@
* Cache implementation inspired by hw/i386/intel_iommu.c
*/
#include "qemu/osdep.h"
-#include "hw/i386/amd_iommu.h"
+#include "hw/i386/pc.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/pci_bus.h"
+#include "amd_iommu.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "trace.h"
diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h
index d370ae3549..aeef802364 100644
--- a/hw/i386/amd_iommu.h
+++ b/hw/i386/amd_iommu.h
@@ -23,11 +23,6 @@
#include "hw/hw.h"
#include "hw/pci/pci.h"
-#include "hw/pci/msi.h"
-#include "hw/sysbus.h"
-#include "sysemu/dma.h"
-#include "hw/i386/pc.h"
-#include "hw/pci/pci_bus.h"
#include "hw/i386/x86-iommu.h"
/* Capability registers */
diff --git a/hw/i386/kvm/i8259.c b/hw/i386/kvm/i8259.c
index 11d1b726b6..b91e98074e 100644
--- a/hw/i386/kvm/i8259.c
+++ b/hw/i386/kvm/i8259.c
@@ -111,6 +111,7 @@ static void kvm_pic_set_irq(void *opaque, int irq, int level)
{
int delivered;
+ pic_stat_update_irq(irq, level);
delivered = kvm_set_irq(kvm_state, irq, level);
apic_report_irq_delivered(delivered);
}
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 186545d2a4..3fcf318a95 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -69,6 +69,7 @@
#include "qom/cpu.h"
#include "hw/nmi.h"
#include "hw/i386/intel_iommu.h"
+#include "hw/net/ne2000-isa.h"
/* debug PC/ISA interrupts */
//#define DEBUG_IRQ
@@ -1565,7 +1566,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT);
}
}
- *rtc_state = rtc_init(isa_bus, 2000, rtc_irq);
+ *rtc_state = mc146818_rtc_init(isa_bus, 2000, rtc_irq);
qemu_register_boot_set(pc_boot_set, *rtc_state);
@@ -1573,7 +1574,7 @@ void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi,
if (kvm_pit_in_kernel()) {
pit = kvm_pit_init(isa_bus, 0x40);
} else {
- pit = pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq);
+ pit = i8254_pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq);
}
if (hpet) {
/* connect PIT to output control line of the HPET */
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
index d43b4b6cd3..22d44648af 100644
--- a/hw/i386/trace-events
+++ b/hw/i386/trace-events
@@ -113,3 +113,7 @@ amdvi_mode_invalid(uint8_t level, uint64_t addr)"error: translation level 0x%"PR
amdvi_page_fault(uint64_t addr) "error: page fault accessing guest physical address 0x%"PRIx64
amdvi_iotlb_hit(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "hit iotlb devid %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64
amdvi_translation_result(uint8_t bus, uint8_t slot, uint8_t func, uint64_t addr, uint64_t txaddr) "devid: %02x:%02x.%x gpa 0x%"PRIx64" hpa 0x%"PRIx64
+
+# hw/i386/vmport.c
+vmport_register(unsigned char command, void *func, void *opaque) "command: 0x%02x func: %p opaque: %p"
+vmport_command(unsigned char command) "command: 0x%02x"
diff --git a/hw/input/vmmouse.c b/hw/i386/vmmouse.c
index b6d22086f4..65ef55329e 100644
--- a/hw/input/vmmouse.c
+++ b/hw/i386/vmmouse.c
@@ -24,7 +24,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "ui/console.h"
-#include "hw/input/ps2.h"
#include "hw/i386/pc.h"
#include "hw/qdev.h"
diff --git a/hw/misc/vmport.c b/hw/i386/vmport.c
index 165500223f..116aa09819 100644
--- a/hw/misc/vmport.c
+++ b/hw/i386/vmport.c
@@ -27,8 +27,8 @@
#include "hw/i386/pc.h"
#include "sysemu/hw_accel.h"
#include "hw/qdev.h"
-
-//#define VMPORT_DEBUG
+#include "qemu/log.h"
+#include "trace.h"
#define VMPORT_CMD_GETVERSION 0x0a
#define VMPORT_CMD_GETRAMSIZE 0x14
@@ -38,8 +38,7 @@
#define VMPORT(obj) OBJECT_CHECK(VMPortState, (obj), TYPE_VMPORT)
-typedef struct VMPortState
-{
+typedef struct VMPortState {
ISADevice parent_obj;
MemoryRegion io;
@@ -51,9 +50,11 @@ static VMPortState *port_state;
void vmport_register(unsigned char command, VMPortReadFunc *func, void *opaque)
{
- if (command >= VMPORT_ENTRIES)
+ if (command >= VMPORT_ENTRIES) {
return;
+ }
+ trace_vmport_register(command, func, opaque);
port_state->func[command] = func;
port_state->opaque[command] = opaque;
}
@@ -71,17 +72,14 @@ static uint64_t vmport_ioport_read(void *opaque, hwaddr addr,
cpu_synchronize_state(cs);
eax = env->regs[R_EAX];
- if (eax != VMPORT_MAGIC)
+ if (eax != VMPORT_MAGIC) {
return eax;
+ }
command = env->regs[R_ECX];
- if (command >= VMPORT_ENTRIES)
- return eax;
- if (!s->func[command])
- {
-#ifdef VMPORT_DEBUG
- fprintf(stderr, "vmport: unknown command %x\n", command);
-#endif
+ trace_vmport_command(command);
+ if (command >= VMPORT_ENTRIES || !s->func[command]) {
+ qemu_log_mask(LOG_UNIMP, "vmport: unknown command %x\n", command);
return eax;
}
diff --git a/hw/i386/xen/xen-mapcache.c b/hw/i386/xen/xen-mapcache.c
index baab93b614..efa35dc6e0 100644
--- a/hw/i386/xen/xen-mapcache.c
+++ b/hw/i386/xen/xen-mapcache.c
@@ -199,7 +199,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
*/
vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_SHARED, -1, 0);
- if (vaddr_base == NULL) {
+ if (vaddr_base == MAP_FAILED) {
perror("mmap");
exit(-1);
}
diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c
index 056b87de0b..fc8623c90b 100644
--- a/hw/i386/xen/xen_platform.c
+++ b/hw/i386/xen/xen_platform.c
@@ -26,7 +26,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/ide.h"
#include "hw/pci/pci.h"
#include "hw/irq.h"
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index 373311f91a..451b18b419 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -24,7 +24,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "qemu/error-report.h"
diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c
index 86b2a8f504..65aff518ec 100644
--- a/hw/ide/cmd646.c
+++ b/hw/ide/cmd646.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "sysemu/block-backend.h"
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 471d0c928b..1ea5812b7e 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -24,17 +24,16 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
+#include "sysemu/blockdev.h"
#include "sysemu/dma.h"
#include "hw/block/block.h"
#include "sysemu/block-backend.h"
#include "qemu/cutils.h"
-#include "qemu/error-report.h"
#include "hw/ide/internal.h"
#include "trace.h"
diff --git a/hw/ide/ich.c b/hw/ide/ich.c
index 8dd0ced6b3..c01b24ecbe 100644
--- a/hw/ide/ich.c
+++ b/hw/ide/ich.c
@@ -63,7 +63,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "sysemu/block-backend.h"
diff --git a/hw/ide/isa.c b/hw/ide/isa.c
index 40213d662c..9fb24fc92b 100644
--- a/hw/ide/isa.c
+++ b/hw/ide/isa.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "sysemu/block-backend.h"
#include "sysemu/dma.h"
diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c
index 17917c0b30..fde4d4645e 100644
--- a/hw/ide/microdrive.c
+++ b/hw/ide/microdrive.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pcmcia.h"
#include "sysemu/block-backend.h"
#include "sysemu/dma.h"
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index 25f1d36f3a..1ab0a892d0 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "sysemu/block-backend.h"
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
index dfb21f65fa..a3afe1fd29 100644
--- a/hw/ide/piix.c
+++ b/hw/ide/piix.c
@@ -25,11 +25,11 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "sysemu/block-backend.h"
#include "sysemu/sysemu.h"
+#include "sysemu/blockdev.h"
#include "sysemu/dma.h"
#include "hw/ide/pci.h"
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
index a5181b4448..f395d24592 100644
--- a/hw/ide/qdev.c
+++ b/hw/ide/qdev.c
@@ -160,7 +160,6 @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp)
{
IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus);
IDEState *s = bus->ifs + dev->unit;
- Error *err = NULL;
int ret;
if (!dev->conf.blk) {
@@ -191,16 +190,13 @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp)
blkconf_serial(&dev->conf, &dev->serial);
if (kind != IDE_CD) {
- blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255, &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_geometry(&dev->conf, &dev->chs_trans, 65535, 16, 255,
+ errp)) {
return;
}
}
- blkconf_apply_backend_options(&dev->conf, kind == IDE_CD, kind != IDE_CD,
- &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_apply_backend_options(&dev->conf, kind == IDE_CD,
+ kind != IDE_CD, errp)) {
return;
}
diff --git a/hw/ide/via.c b/hw/ide/via.c
index 35c3059325..117ac4d95e 100644
--- a/hw/ide/via.c
+++ b/hw/ide/via.c
@@ -25,7 +25,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/isa/isa.h"
#include "sysemu/block-backend.h"
diff --git a/hw/input/Makefile.objs b/hw/input/Makefile.objs
index 7715d7230d..77e53e6883 100644
--- a/hw/input/Makefile.objs
+++ b/hw/input/Makefile.objs
@@ -1,4 +1,4 @@
-common-obj-$(CONFIG_ADB) += adb.o
+common-obj-$(CONFIG_ADB) += adb.o adb-mouse.o adb-kbd.o
common-obj-y += hid.o
common-obj-$(CONFIG_LM832X) += lm832x.o
common-obj-$(CONFIG_PCKBD) += pckbd.o
@@ -6,7 +6,6 @@ common-obj-$(CONFIG_PL050) += pl050.o
common-obj-y += ps2.o
common-obj-$(CONFIG_STELLARIS_INPUT) += stellaris_input.o
common-obj-$(CONFIG_TSC2005) += tsc2005.o
-common-obj-$(CONFIG_VMMOUSE) += vmmouse.o
common-obj-$(CONFIG_VIRTIO) += virtio-input.o
common-obj-$(CONFIG_VIRTIO) += virtio-input-hid.o
diff --git a/hw/input/adb-internal.h b/hw/input/adb-internal.h
new file mode 100644
index 0000000000..2a779b8a0a
--- /dev/null
+++ b/hw/input/adb-internal.h
@@ -0,0 +1,49 @@
+/*
+ * QEMU ADB support
+ *
+ * Copyright (c) 2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/* ADB commands */
+
+#define ADB_BUSRESET 0x00
+#define ADB_FLUSH 0x01
+#define ADB_WRITEREG 0x08
+#define ADB_READREG 0x0c
+
+/* ADB device commands */
+
+#define ADB_CMD_SELF_TEST 0xff
+#define ADB_CMD_CHANGE_ID 0xfe
+#define ADB_CMD_CHANGE_ID_AND_ACT 0xfd
+#define ADB_CMD_CHANGE_ID_AND_ENABLE 0x00
+
+/* ADB default device IDs (upper 4 bits of ADB command byte) */
+
+#define ADB_DEVID_DONGLE 1
+#define ADB_DEVID_KEYBOARD 2
+#define ADB_DEVID_MOUSE 3
+#define ADB_DEVID_TABLET 4
+#define ADB_DEVID_MODEM 5
+#define ADB_DEVID_MISC 7
+
+extern const VMStateDescription vmstate_adb_device;
+
diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c
new file mode 100644
index 0000000000..354f56e41e
--- /dev/null
+++ b/hw/input/adb-kbd.c
@@ -0,0 +1,400 @@
+/*
+ * QEMU ADB keyboard support
+ *
+ * Copyright (c) 2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "hw/input/adb.h"
+#include "ui/input.h"
+#include "hw/input/adb-keys.h"
+#include "sysemu/sysemu.h"
+#include "adb-internal.h"
+#include "trace.h"
+
+#define ADB_KEYBOARD(obj) OBJECT_CHECK(KBDState, (obj), TYPE_ADB_KEYBOARD)
+
+typedef struct KBDState {
+ /*< private >*/
+ ADBDevice parent_obj;
+ /*< public >*/
+
+ uint8_t data[128];
+ int rptr, wptr, count;
+} KBDState;
+
+#define ADB_KEYBOARD_CLASS(class) \
+ OBJECT_CLASS_CHECK(ADBKeyboardClass, (class), TYPE_ADB_KEYBOARD)
+#define ADB_KEYBOARD_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(ADBKeyboardClass, (obj), TYPE_ADB_KEYBOARD)
+
+typedef struct ADBKeyboardClass {
+ /*< private >*/
+ ADBDeviceClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+} ADBKeyboardClass;
+
+/* The adb keyboard doesn't have every key imaginable */
+#define NO_KEY 0xff
+
+int qcode_to_adb_keycode[] = {
+ /* Make sure future additions are automatically set to NO_KEY */
+ [0 ... 0xff] = NO_KEY,
+
+ [Q_KEY_CODE_SHIFT] = ADB_KEY_LEFT_SHIFT,
+ [Q_KEY_CODE_SHIFT_R] = ADB_KEY_RIGHT_SHIFT,
+ [Q_KEY_CODE_ALT] = ADB_KEY_LEFT_OPTION,
+ [Q_KEY_CODE_ALT_R] = ADB_KEY_RIGHT_OPTION,
+ [Q_KEY_CODE_CTRL] = ADB_KEY_LEFT_CONTROL,
+ [Q_KEY_CODE_CTRL_R] = ADB_KEY_RIGHT_CONTROL,
+ [Q_KEY_CODE_META_L] = ADB_KEY_COMMAND,
+ [Q_KEY_CODE_META_R] = ADB_KEY_COMMAND,
+ [Q_KEY_CODE_SPC] = ADB_KEY_SPACEBAR,
+
+ [Q_KEY_CODE_ESC] = ADB_KEY_ESC,
+ [Q_KEY_CODE_1] = ADB_KEY_1,
+ [Q_KEY_CODE_2] = ADB_KEY_2,
+ [Q_KEY_CODE_3] = ADB_KEY_3,
+ [Q_KEY_CODE_4] = ADB_KEY_4,
+ [Q_KEY_CODE_5] = ADB_KEY_5,
+ [Q_KEY_CODE_6] = ADB_KEY_6,
+ [Q_KEY_CODE_7] = ADB_KEY_7,
+ [Q_KEY_CODE_8] = ADB_KEY_8,
+ [Q_KEY_CODE_9] = ADB_KEY_9,
+ [Q_KEY_CODE_0] = ADB_KEY_0,
+ [Q_KEY_CODE_MINUS] = ADB_KEY_MINUS,
+ [Q_KEY_CODE_EQUAL] = ADB_KEY_EQUAL,
+ [Q_KEY_CODE_BACKSPACE] = ADB_KEY_DELETE,
+ [Q_KEY_CODE_TAB] = ADB_KEY_TAB,
+ [Q_KEY_CODE_Q] = ADB_KEY_Q,
+ [Q_KEY_CODE_W] = ADB_KEY_W,
+ [Q_KEY_CODE_E] = ADB_KEY_E,
+ [Q_KEY_CODE_R] = ADB_KEY_R,
+ [Q_KEY_CODE_T] = ADB_KEY_T,
+ [Q_KEY_CODE_Y] = ADB_KEY_Y,
+ [Q_KEY_CODE_U] = ADB_KEY_U,
+ [Q_KEY_CODE_I] = ADB_KEY_I,
+ [Q_KEY_CODE_O] = ADB_KEY_O,
+ [Q_KEY_CODE_P] = ADB_KEY_P,
+ [Q_KEY_CODE_BRACKET_LEFT] = ADB_KEY_LEFT_BRACKET,
+ [Q_KEY_CODE_BRACKET_RIGHT] = ADB_KEY_RIGHT_BRACKET,
+ [Q_KEY_CODE_RET] = ADB_KEY_RETURN,
+ [Q_KEY_CODE_A] = ADB_KEY_A,
+ [Q_KEY_CODE_S] = ADB_KEY_S,
+ [Q_KEY_CODE_D] = ADB_KEY_D,
+ [Q_KEY_CODE_F] = ADB_KEY_F,
+ [Q_KEY_CODE_G] = ADB_KEY_G,
+ [Q_KEY_CODE_H] = ADB_KEY_H,
+ [Q_KEY_CODE_J] = ADB_KEY_J,
+ [Q_KEY_CODE_K] = ADB_KEY_K,
+ [Q_KEY_CODE_L] = ADB_KEY_L,
+ [Q_KEY_CODE_SEMICOLON] = ADB_KEY_SEMICOLON,
+ [Q_KEY_CODE_APOSTROPHE] = ADB_KEY_APOSTROPHE,
+ [Q_KEY_CODE_GRAVE_ACCENT] = ADB_KEY_GRAVE_ACCENT,
+ [Q_KEY_CODE_BACKSLASH] = ADB_KEY_BACKSLASH,
+ [Q_KEY_CODE_Z] = ADB_KEY_Z,
+ [Q_KEY_CODE_X] = ADB_KEY_X,
+ [Q_KEY_CODE_C] = ADB_KEY_C,
+ [Q_KEY_CODE_V] = ADB_KEY_V,
+ [Q_KEY_CODE_B] = ADB_KEY_B,
+ [Q_KEY_CODE_N] = ADB_KEY_N,
+ [Q_KEY_CODE_M] = ADB_KEY_M,
+ [Q_KEY_CODE_COMMA] = ADB_KEY_COMMA,
+ [Q_KEY_CODE_DOT] = ADB_KEY_PERIOD,
+ [Q_KEY_CODE_SLASH] = ADB_KEY_FORWARD_SLASH,
+ [Q_KEY_CODE_ASTERISK] = ADB_KEY_KP_MULTIPLY,
+ [Q_KEY_CODE_CAPS_LOCK] = ADB_KEY_CAPS_LOCK,
+
+ [Q_KEY_CODE_F1] = ADB_KEY_F1,
+ [Q_KEY_CODE_F2] = ADB_KEY_F2,
+ [Q_KEY_CODE_F3] = ADB_KEY_F3,
+ [Q_KEY_CODE_F4] = ADB_KEY_F4,
+ [Q_KEY_CODE_F5] = ADB_KEY_F5,
+ [Q_KEY_CODE_F6] = ADB_KEY_F6,
+ [Q_KEY_CODE_F7] = ADB_KEY_F7,
+ [Q_KEY_CODE_F8] = ADB_KEY_F8,
+ [Q_KEY_CODE_F9] = ADB_KEY_F9,
+ [Q_KEY_CODE_F10] = ADB_KEY_F10,
+ [Q_KEY_CODE_F11] = ADB_KEY_F11,
+ [Q_KEY_CODE_F12] = ADB_KEY_F12,
+ [Q_KEY_CODE_PRINT] = ADB_KEY_F13,
+ [Q_KEY_CODE_SYSRQ] = ADB_KEY_F13,
+ [Q_KEY_CODE_SCROLL_LOCK] = ADB_KEY_F14,
+ [Q_KEY_CODE_PAUSE] = ADB_KEY_F15,
+
+ [Q_KEY_CODE_NUM_LOCK] = ADB_KEY_KP_CLEAR,
+ [Q_KEY_CODE_KP_EQUALS] = ADB_KEY_KP_EQUAL,
+ [Q_KEY_CODE_KP_DIVIDE] = ADB_KEY_KP_DIVIDE,
+ [Q_KEY_CODE_KP_MULTIPLY] = ADB_KEY_KP_MULTIPLY,
+ [Q_KEY_CODE_KP_SUBTRACT] = ADB_KEY_KP_SUBTRACT,
+ [Q_KEY_CODE_KP_ADD] = ADB_KEY_KP_PLUS,
+ [Q_KEY_CODE_KP_ENTER] = ADB_KEY_KP_ENTER,
+ [Q_KEY_CODE_KP_DECIMAL] = ADB_KEY_KP_PERIOD,
+ [Q_KEY_CODE_KP_0] = ADB_KEY_KP_0,
+ [Q_KEY_CODE_KP_1] = ADB_KEY_KP_1,
+ [Q_KEY_CODE_KP_2] = ADB_KEY_KP_2,
+ [Q_KEY_CODE_KP_3] = ADB_KEY_KP_3,
+ [Q_KEY_CODE_KP_4] = ADB_KEY_KP_4,
+ [Q_KEY_CODE_KP_5] = ADB_KEY_KP_5,
+ [Q_KEY_CODE_KP_6] = ADB_KEY_KP_6,
+ [Q_KEY_CODE_KP_7] = ADB_KEY_KP_7,
+ [Q_KEY_CODE_KP_8] = ADB_KEY_KP_8,
+ [Q_KEY_CODE_KP_9] = ADB_KEY_KP_9,
+
+ [Q_KEY_CODE_UP] = ADB_KEY_UP,
+ [Q_KEY_CODE_DOWN] = ADB_KEY_DOWN,
+ [Q_KEY_CODE_LEFT] = ADB_KEY_LEFT,
+ [Q_KEY_CODE_RIGHT] = ADB_KEY_RIGHT,
+
+ [Q_KEY_CODE_HELP] = ADB_KEY_HELP,
+ [Q_KEY_CODE_INSERT] = ADB_KEY_HELP,
+ [Q_KEY_CODE_DELETE] = ADB_KEY_FORWARD_DELETE,
+ [Q_KEY_CODE_HOME] = ADB_KEY_HOME,
+ [Q_KEY_CODE_END] = ADB_KEY_END,
+ [Q_KEY_CODE_PGUP] = ADB_KEY_PAGE_UP,
+ [Q_KEY_CODE_PGDN] = ADB_KEY_PAGE_DOWN,
+
+ [Q_KEY_CODE_POWER] = ADB_KEY_POWER
+};
+
+static void adb_kbd_put_keycode(void *opaque, int keycode)
+{
+ KBDState *s = opaque;
+
+ if (s->count < sizeof(s->data)) {
+ s->data[s->wptr] = keycode;
+ if (++s->wptr == sizeof(s->data)) {
+ s->wptr = 0;
+ }
+ s->count++;
+ }
+}
+
+static int adb_kbd_poll(ADBDevice *d, uint8_t *obuf)
+{
+ KBDState *s = ADB_KEYBOARD(d);
+ int keycode;
+ int olen;
+
+ olen = 0;
+ if (s->count == 0) {
+ return 0;
+ }
+ keycode = s->data[s->rptr];
+ s->rptr++;
+ if (s->rptr == sizeof(s->data)) {
+ s->rptr = 0;
+ }
+ s->count--;
+ /*
+ * The power key is the only two byte value key, so it is a special case.
+ * Since 0x7f is not a used keycode for ADB we overload it to indicate the
+ * power button when we're storing keycodes in our internal buffer, and
+ * expand it out to two bytes when we send to the guest.
+ */
+ if (keycode == 0x7f) {
+ obuf[0] = 0x7f;
+ obuf[1] = 0x7f;
+ olen = 2;
+ } else {
+ obuf[0] = keycode;
+ /* NOTE: the power key key-up is the two byte sequence 0xff 0xff;
+ * otherwise we could in theory send a second keycode in the second
+ * byte, but choose not to bother.
+ */
+ obuf[1] = 0xff;
+ olen = 2;
+ }
+
+ return olen;
+}
+
+static int adb_kbd_request(ADBDevice *d, uint8_t *obuf,
+ const uint8_t *buf, int len)
+{
+ KBDState *s = ADB_KEYBOARD(d);
+ int cmd, reg, olen;
+
+ if ((buf[0] & 0x0f) == ADB_FLUSH) {
+ /* flush keyboard fifo */
+ s->wptr = s->rptr = s->count = 0;
+ return 0;
+ }
+
+ cmd = buf[0] & 0xc;
+ reg = buf[0] & 0x3;
+ olen = 0;
+ switch (cmd) {
+ case ADB_WRITEREG:
+ trace_adb_kbd_writereg(reg, buf[1]);
+ switch (reg) {
+ case 2:
+ /* LED status */
+ break;
+ case 3:
+ switch (buf[2]) {
+ case ADB_CMD_SELF_TEST:
+ break;
+ case ADB_CMD_CHANGE_ID:
+ case ADB_CMD_CHANGE_ID_AND_ACT:
+ case ADB_CMD_CHANGE_ID_AND_ENABLE:
+ d->devaddr = buf[1] & 0xf;
+ break;
+ default:
+ d->devaddr = buf[1] & 0xf;
+ /* we support handlers:
+ * 1: Apple Standard Keyboard
+ * 2: Apple Extended Keyboard (LShift = RShift)
+ * 3: Apple Extended Keyboard (LShift != RShift)
+ */
+ if (buf[2] == 1 || buf[2] == 2 || buf[2] == 3) {
+ d->handler = buf[2];
+ }
+ break;
+ }
+ }
+ break;
+ case ADB_READREG:
+ switch (reg) {
+ case 0:
+ olen = adb_kbd_poll(d, obuf);
+ break;
+ case 1:
+ break;
+ case 2:
+ obuf[0] = 0x00; /* XXX: check this */
+ obuf[1] = 0x07; /* led status */
+ olen = 2;
+ break;
+ case 3:
+ obuf[0] = d->handler;
+ obuf[1] = d->devaddr;
+ olen = 2;
+ break;
+ }
+ trace_adb_kbd_readreg(reg, obuf[0], obuf[1]);
+ break;
+ }
+ return olen;
+}
+
+/* This is where keyboard events enter this file */
+static void adb_keyboard_event(DeviceState *dev, QemuConsole *src,
+ InputEvent *evt)
+{
+ KBDState *s = (KBDState *)dev;
+ int qcode, keycode;
+
+ qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key);
+ if (qcode >= ARRAY_SIZE(qcode_to_adb_keycode)) {
+ return;
+ }
+ /* FIXME: take handler into account when translating qcode */
+ keycode = qcode_to_adb_keycode[qcode];
+ if (keycode == NO_KEY) { /* We don't want to send this to the guest */
+ trace_adb_kbd_no_key();
+ return;
+ }
+ if (evt->u.key.data->down == false) { /* if key release event */
+ keycode = keycode | 0x80; /* create keyboard break code */
+ }
+
+ adb_kbd_put_keycode(s, keycode);
+}
+
+static const VMStateDescription vmstate_adb_kbd = {
+ .name = "adb_kbd",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(parent_obj, KBDState, 0, vmstate_adb_device, ADBDevice),
+ VMSTATE_BUFFER(data, KBDState),
+ VMSTATE_INT32(rptr, KBDState),
+ VMSTATE_INT32(wptr, KBDState),
+ VMSTATE_INT32(count, KBDState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void adb_kbd_reset(DeviceState *dev)
+{
+ ADBDevice *d = ADB_DEVICE(dev);
+ KBDState *s = ADB_KEYBOARD(dev);
+
+ d->handler = 1;
+ d->devaddr = ADB_DEVID_KEYBOARD;
+ memset(s->data, 0, sizeof(s->data));
+ s->rptr = 0;
+ s->wptr = 0;
+ s->count = 0;
+}
+
+static QemuInputHandler adb_keyboard_handler = {
+ .name = "QEMU ADB Keyboard",
+ .mask = INPUT_EVENT_MASK_KEY,
+ .event = adb_keyboard_event,
+};
+
+static void adb_kbd_realizefn(DeviceState *dev, Error **errp)
+{
+ ADBKeyboardClass *akc = ADB_KEYBOARD_GET_CLASS(dev);
+ akc->parent_realize(dev, errp);
+ qemu_input_handler_register(dev, &adb_keyboard_handler);
+}
+
+static void adb_kbd_initfn(Object *obj)
+{
+ ADBDevice *d = ADB_DEVICE(obj);
+
+ d->devaddr = ADB_DEVID_KEYBOARD;
+}
+
+static void adb_kbd_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc);
+ ADBKeyboardClass *akc = ADB_KEYBOARD_CLASS(oc);
+
+ akc->parent_realize = dc->realize;
+ dc->realize = adb_kbd_realizefn;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+
+ adc->devreq = adb_kbd_request;
+ dc->reset = adb_kbd_reset;
+ dc->vmsd = &vmstate_adb_kbd;
+}
+
+static const TypeInfo adb_kbd_type_info = {
+ .name = TYPE_ADB_KEYBOARD,
+ .parent = TYPE_ADB_DEVICE,
+ .instance_size = sizeof(KBDState),
+ .instance_init = adb_kbd_initfn,
+ .class_init = adb_kbd_class_init,
+ .class_size = sizeof(ADBKeyboardClass),
+};
+
+static void adb_kbd_register_types(void)
+{
+ type_register_static(&adb_kbd_type_info);
+}
+
+type_init(adb_kbd_register_types)
diff --git a/hw/input/adb-mouse.c b/hw/input/adb-mouse.c
new file mode 100644
index 0000000000..c9004233b8
--- /dev/null
+++ b/hw/input/adb-mouse.c
@@ -0,0 +1,254 @@
+/*
+ * QEMU ADB mouse support
+ *
+ * Copyright (c) 2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "ui/console.h"
+#include "hw/input/adb.h"
+#include "adb-internal.h"
+#include "trace.h"
+
+#define ADB_MOUSE(obj) OBJECT_CHECK(MouseState, (obj), TYPE_ADB_MOUSE)
+
+typedef struct MouseState {
+ /*< public >*/
+ ADBDevice parent_obj;
+ /*< private >*/
+
+ int buttons_state, last_buttons_state;
+ int dx, dy, dz;
+} MouseState;
+
+#define ADB_MOUSE_CLASS(class) \
+ OBJECT_CLASS_CHECK(ADBMouseClass, (class), TYPE_ADB_MOUSE)
+#define ADB_MOUSE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(ADBMouseClass, (obj), TYPE_ADB_MOUSE)
+
+typedef struct ADBMouseClass {
+ /*< public >*/
+ ADBDeviceClass parent_class;
+ /*< private >*/
+
+ DeviceRealize parent_realize;
+} ADBMouseClass;
+
+static void adb_mouse_event(void *opaque,
+ int dx1, int dy1, int dz1, int buttons_state)
+{
+ MouseState *s = opaque;
+
+ s->dx += dx1;
+ s->dy += dy1;
+ s->dz += dz1;
+ s->buttons_state = buttons_state;
+}
+
+
+static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf)
+{
+ MouseState *s = ADB_MOUSE(d);
+ int dx, dy;
+
+ if (s->last_buttons_state == s->buttons_state &&
+ s->dx == 0 && s->dy == 0) {
+ return 0;
+ }
+
+ dx = s->dx;
+ if (dx < -63) {
+ dx = -63;
+ } else if (dx > 63) {
+ dx = 63;
+ }
+
+ dy = s->dy;
+ if (dy < -63) {
+ dy = -63;
+ } else if (dy > 63) {
+ dy = 63;
+ }
+
+ s->dx -= dx;
+ s->dy -= dy;
+ s->last_buttons_state = s->buttons_state;
+
+ dx &= 0x7f;
+ dy &= 0x7f;
+
+ if (!(s->buttons_state & MOUSE_EVENT_LBUTTON)) {
+ dy |= 0x80;
+ }
+ if (!(s->buttons_state & MOUSE_EVENT_RBUTTON)) {
+ dx |= 0x80;
+ }
+
+ obuf[0] = dy;
+ obuf[1] = dx;
+ return 2;
+}
+
+static int adb_mouse_request(ADBDevice *d, uint8_t *obuf,
+ const uint8_t *buf, int len)
+{
+ MouseState *s = ADB_MOUSE(d);
+ int cmd, reg, olen;
+
+ if ((buf[0] & 0x0f) == ADB_FLUSH) {
+ /* flush mouse fifo */
+ s->buttons_state = s->last_buttons_state;
+ s->dx = 0;
+ s->dy = 0;
+ s->dz = 0;
+ return 0;
+ }
+
+ cmd = buf[0] & 0xc;
+ reg = buf[0] & 0x3;
+ olen = 0;
+ switch (cmd) {
+ case ADB_WRITEREG:
+ trace_adb_mouse_writereg(reg, buf[1]);
+ switch (reg) {
+ case 2:
+ break;
+ case 3:
+ switch (buf[2]) {
+ case ADB_CMD_SELF_TEST:
+ break;
+ case ADB_CMD_CHANGE_ID:
+ case ADB_CMD_CHANGE_ID_AND_ACT:
+ case ADB_CMD_CHANGE_ID_AND_ENABLE:
+ d->devaddr = buf[1] & 0xf;
+ break;
+ default:
+ d->devaddr = buf[1] & 0xf;
+ /* we support handlers:
+ * 0x01: Classic Apple Mouse Protocol / 100 cpi operations
+ * 0x02: Classic Apple Mouse Protocol / 200 cpi operations
+ * we don't support handlers (at least):
+ * 0x03: Mouse systems A3 trackball
+ * 0x04: Extended Apple Mouse Protocol
+ * 0x2f: Microspeed mouse
+ * 0x42: Macally
+ * 0x5f: Microspeed mouse
+ * 0x66: Microspeed mouse
+ */
+ if (buf[2] == 1 || buf[2] == 2) {
+ d->handler = buf[2];
+ }
+ break;
+ }
+ }
+ break;
+ case ADB_READREG:
+ switch (reg) {
+ case 0:
+ olen = adb_mouse_poll(d, obuf);
+ break;
+ case 1:
+ break;
+ case 3:
+ obuf[0] = d->handler;
+ obuf[1] = d->devaddr;
+ olen = 2;
+ break;
+ }
+ trace_adb_mouse_readreg(reg, obuf[0], obuf[1]);
+ break;
+ }
+ return olen;
+}
+
+static void adb_mouse_reset(DeviceState *dev)
+{
+ ADBDevice *d = ADB_DEVICE(dev);
+ MouseState *s = ADB_MOUSE(dev);
+
+ d->handler = 2;
+ d->devaddr = ADB_DEVID_MOUSE;
+ s->last_buttons_state = s->buttons_state = 0;
+ s->dx = s->dy = s->dz = 0;
+}
+
+static const VMStateDescription vmstate_adb_mouse = {
+ .name = "adb_mouse",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(parent_obj, MouseState, 0, vmstate_adb_device,
+ ADBDevice),
+ VMSTATE_INT32(buttons_state, MouseState),
+ VMSTATE_INT32(last_buttons_state, MouseState),
+ VMSTATE_INT32(dx, MouseState),
+ VMSTATE_INT32(dy, MouseState),
+ VMSTATE_INT32(dz, MouseState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void adb_mouse_realizefn(DeviceState *dev, Error **errp)
+{
+ MouseState *s = ADB_MOUSE(dev);
+ ADBMouseClass *amc = ADB_MOUSE_GET_CLASS(dev);
+
+ amc->parent_realize(dev, errp);
+
+ qemu_add_mouse_event_handler(adb_mouse_event, s, 0, "QEMU ADB Mouse");
+}
+
+static void adb_mouse_initfn(Object *obj)
+{
+ ADBDevice *d = ADB_DEVICE(obj);
+
+ d->devaddr = ADB_DEVID_MOUSE;
+}
+
+static void adb_mouse_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc);
+ ADBMouseClass *amc = ADB_MOUSE_CLASS(oc);
+
+ amc->parent_realize = dc->realize;
+ dc->realize = adb_mouse_realizefn;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+
+ adc->devreq = adb_mouse_request;
+ dc->reset = adb_mouse_reset;
+ dc->vmsd = &vmstate_adb_mouse;
+}
+
+static const TypeInfo adb_mouse_type_info = {
+ .name = TYPE_ADB_MOUSE,
+ .parent = TYPE_ADB_DEVICE,
+ .instance_size = sizeof(MouseState),
+ .instance_init = adb_mouse_initfn,
+ .class_init = adb_mouse_class_init,
+ .class_size = sizeof(ADBMouseClass),
+};
+
+static void adb_mouse_register_types(void)
+{
+ type_register_static(&adb_mouse_type_info);
+}
+
+type_init(adb_mouse_register_types)
diff --git a/hw/input/adb.c b/hw/input/adb.c
index fcca3a8eb9..23ae6f0d75 100644
--- a/hw/input/adb.c
+++ b/hw/input/adb.c
@@ -22,49 +22,12 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "hw/hw.h"
#include "hw/input/adb.h"
-#include "ui/console.h"
-#include "include/hw/input/adb-keys.h"
-#include "ui/input.h"
-#include "sysemu/sysemu.h"
-
-/* debug ADB */
-//#define DEBUG_ADB
-
-#ifdef DEBUG_ADB
-#define ADB_DPRINTF(fmt, ...) \
-do { printf("ADB: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define ADB_DPRINTF(fmt, ...)
-#endif
-
-/* ADB commands */
-#define ADB_BUSRESET 0x00
-#define ADB_FLUSH 0x01
-#define ADB_WRITEREG 0x08
-#define ADB_READREG 0x0c
-
-/* ADB device commands */
-#define ADB_CMD_SELF_TEST 0xff
-#define ADB_CMD_CHANGE_ID 0xfe
-#define ADB_CMD_CHANGE_ID_AND_ACT 0xfd
-#define ADB_CMD_CHANGE_ID_AND_ENABLE 0x00
-
-/* ADB default device IDs (upper 4 bits of ADB command byte) */
-#define ADB_DEVID_DONGLE 1
-#define ADB_DEVID_KEYBOARD 2
-#define ADB_DEVID_MOUSE 3
-#define ADB_DEVID_TABLET 4
-#define ADB_DEVID_MODEM 5
-#define ADB_DEVID_MISC 7
+#include "adb-internal.h"
/* error codes */
#define ADB_RET_NOTPRESENT (-2)
-/* The adb keyboard doesn't have every key imaginable */
-#define NO_KEY 0xff
-
static void adb_device_reset(ADBDevice *d)
{
qdev_reset_all(DEVICE(d));
@@ -127,7 +90,7 @@ static const TypeInfo adb_bus_type_info = {
.instance_size = sizeof(ADBBusState),
};
-static const VMStateDescription vmstate_adb_device = {
+const VMStateDescription vmstate_adb_device = {
.name = "adb_device",
.version_id = 0,
.minimum_version_id = 0,
@@ -166,591 +129,10 @@ static const TypeInfo adb_device_type_info = {
.class_init = adb_device_class_init,
};
-/***************************************************************/
-/* Keyboard ADB device */
-
-#define ADB_KEYBOARD(obj) OBJECT_CHECK(KBDState, (obj), TYPE_ADB_KEYBOARD)
-
-typedef struct KBDState {
- /*< private >*/
- ADBDevice parent_obj;
- /*< public >*/
-
- uint8_t data[128];
- int rptr, wptr, count;
-} KBDState;
-
-#define ADB_KEYBOARD_CLASS(class) \
- OBJECT_CLASS_CHECK(ADBKeyboardClass, (class), TYPE_ADB_KEYBOARD)
-#define ADB_KEYBOARD_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ADBKeyboardClass, (obj), TYPE_ADB_KEYBOARD)
-
-typedef struct ADBKeyboardClass {
- /*< private >*/
- ADBDeviceClass parent_class;
- /*< public >*/
-
- DeviceRealize parent_realize;
-} ADBKeyboardClass;
-
-int qcode_to_adb_keycode[] = {
- /* Make sure future additions are automatically set to NO_KEY */
- [0 ... 0xff] = NO_KEY,
-
- [Q_KEY_CODE_SHIFT] = ADB_KEY_LEFT_SHIFT,
- [Q_KEY_CODE_SHIFT_R] = ADB_KEY_RIGHT_SHIFT,
- [Q_KEY_CODE_ALT] = ADB_KEY_LEFT_OPTION,
- [Q_KEY_CODE_ALT_R] = ADB_KEY_RIGHT_OPTION,
- [Q_KEY_CODE_CTRL] = ADB_KEY_LEFT_CONTROL,
- [Q_KEY_CODE_CTRL_R] = ADB_KEY_RIGHT_CONTROL,
- [Q_KEY_CODE_META_L] = ADB_KEY_COMMAND,
- [Q_KEY_CODE_META_R] = ADB_KEY_COMMAND,
- [Q_KEY_CODE_SPC] = ADB_KEY_SPACEBAR,
-
- [Q_KEY_CODE_ESC] = ADB_KEY_ESC,
- [Q_KEY_CODE_1] = ADB_KEY_1,
- [Q_KEY_CODE_2] = ADB_KEY_2,
- [Q_KEY_CODE_3] = ADB_KEY_3,
- [Q_KEY_CODE_4] = ADB_KEY_4,
- [Q_KEY_CODE_5] = ADB_KEY_5,
- [Q_KEY_CODE_6] = ADB_KEY_6,
- [Q_KEY_CODE_7] = ADB_KEY_7,
- [Q_KEY_CODE_8] = ADB_KEY_8,
- [Q_KEY_CODE_9] = ADB_KEY_9,
- [Q_KEY_CODE_0] = ADB_KEY_0,
- [Q_KEY_CODE_MINUS] = ADB_KEY_MINUS,
- [Q_KEY_CODE_EQUAL] = ADB_KEY_EQUAL,
- [Q_KEY_CODE_BACKSPACE] = ADB_KEY_DELETE,
- [Q_KEY_CODE_TAB] = ADB_KEY_TAB,
- [Q_KEY_CODE_Q] = ADB_KEY_Q,
- [Q_KEY_CODE_W] = ADB_KEY_W,
- [Q_KEY_CODE_E] = ADB_KEY_E,
- [Q_KEY_CODE_R] = ADB_KEY_R,
- [Q_KEY_CODE_T] = ADB_KEY_T,
- [Q_KEY_CODE_Y] = ADB_KEY_Y,
- [Q_KEY_CODE_U] = ADB_KEY_U,
- [Q_KEY_CODE_I] = ADB_KEY_I,
- [Q_KEY_CODE_O] = ADB_KEY_O,
- [Q_KEY_CODE_P] = ADB_KEY_P,
- [Q_KEY_CODE_BRACKET_LEFT] = ADB_KEY_LEFT_BRACKET,
- [Q_KEY_CODE_BRACKET_RIGHT] = ADB_KEY_RIGHT_BRACKET,
- [Q_KEY_CODE_RET] = ADB_KEY_RETURN,
- [Q_KEY_CODE_A] = ADB_KEY_A,
- [Q_KEY_CODE_S] = ADB_KEY_S,
- [Q_KEY_CODE_D] = ADB_KEY_D,
- [Q_KEY_CODE_F] = ADB_KEY_F,
- [Q_KEY_CODE_G] = ADB_KEY_G,
- [Q_KEY_CODE_H] = ADB_KEY_H,
- [Q_KEY_CODE_J] = ADB_KEY_J,
- [Q_KEY_CODE_K] = ADB_KEY_K,
- [Q_KEY_CODE_L] = ADB_KEY_L,
- [Q_KEY_CODE_SEMICOLON] = ADB_KEY_SEMICOLON,
- [Q_KEY_CODE_APOSTROPHE] = ADB_KEY_APOSTROPHE,
- [Q_KEY_CODE_GRAVE_ACCENT] = ADB_KEY_GRAVE_ACCENT,
- [Q_KEY_CODE_BACKSLASH] = ADB_KEY_BACKSLASH,
- [Q_KEY_CODE_Z] = ADB_KEY_Z,
- [Q_KEY_CODE_X] = ADB_KEY_X,
- [Q_KEY_CODE_C] = ADB_KEY_C,
- [Q_KEY_CODE_V] = ADB_KEY_V,
- [Q_KEY_CODE_B] = ADB_KEY_B,
- [Q_KEY_CODE_N] = ADB_KEY_N,
- [Q_KEY_CODE_M] = ADB_KEY_M,
- [Q_KEY_CODE_COMMA] = ADB_KEY_COMMA,
- [Q_KEY_CODE_DOT] = ADB_KEY_PERIOD,
- [Q_KEY_CODE_SLASH] = ADB_KEY_FORWARD_SLASH,
- [Q_KEY_CODE_ASTERISK] = ADB_KEY_KP_MULTIPLY,
- [Q_KEY_CODE_CAPS_LOCK] = ADB_KEY_CAPS_LOCK,
-
- [Q_KEY_CODE_F1] = ADB_KEY_F1,
- [Q_KEY_CODE_F2] = ADB_KEY_F2,
- [Q_KEY_CODE_F3] = ADB_KEY_F3,
- [Q_KEY_CODE_F4] = ADB_KEY_F4,
- [Q_KEY_CODE_F5] = ADB_KEY_F5,
- [Q_KEY_CODE_F6] = ADB_KEY_F6,
- [Q_KEY_CODE_F7] = ADB_KEY_F7,
- [Q_KEY_CODE_F8] = ADB_KEY_F8,
- [Q_KEY_CODE_F9] = ADB_KEY_F9,
- [Q_KEY_CODE_F10] = ADB_KEY_F10,
- [Q_KEY_CODE_F11] = ADB_KEY_F11,
- [Q_KEY_CODE_F12] = ADB_KEY_F12,
- [Q_KEY_CODE_PRINT] = ADB_KEY_F13,
- [Q_KEY_CODE_SYSRQ] = ADB_KEY_F13,
- [Q_KEY_CODE_SCROLL_LOCK] = ADB_KEY_F14,
- [Q_KEY_CODE_PAUSE] = ADB_KEY_F15,
-
- [Q_KEY_CODE_NUM_LOCK] = ADB_KEY_KP_CLEAR,
- [Q_KEY_CODE_KP_EQUALS] = ADB_KEY_KP_EQUAL,
- [Q_KEY_CODE_KP_DIVIDE] = ADB_KEY_KP_DIVIDE,
- [Q_KEY_CODE_KP_MULTIPLY] = ADB_KEY_KP_MULTIPLY,
- [Q_KEY_CODE_KP_SUBTRACT] = ADB_KEY_KP_SUBTRACT,
- [Q_KEY_CODE_KP_ADD] = ADB_KEY_KP_PLUS,
- [Q_KEY_CODE_KP_ENTER] = ADB_KEY_KP_ENTER,
- [Q_KEY_CODE_KP_DECIMAL] = ADB_KEY_KP_PERIOD,
- [Q_KEY_CODE_KP_0] = ADB_KEY_KP_0,
- [Q_KEY_CODE_KP_1] = ADB_KEY_KP_1,
- [Q_KEY_CODE_KP_2] = ADB_KEY_KP_2,
- [Q_KEY_CODE_KP_3] = ADB_KEY_KP_3,
- [Q_KEY_CODE_KP_4] = ADB_KEY_KP_4,
- [Q_KEY_CODE_KP_5] = ADB_KEY_KP_5,
- [Q_KEY_CODE_KP_6] = ADB_KEY_KP_6,
- [Q_KEY_CODE_KP_7] = ADB_KEY_KP_7,
- [Q_KEY_CODE_KP_8] = ADB_KEY_KP_8,
- [Q_KEY_CODE_KP_9] = ADB_KEY_KP_9,
-
- [Q_KEY_CODE_UP] = ADB_KEY_UP,
- [Q_KEY_CODE_DOWN] = ADB_KEY_DOWN,
- [Q_KEY_CODE_LEFT] = ADB_KEY_LEFT,
- [Q_KEY_CODE_RIGHT] = ADB_KEY_RIGHT,
-
- [Q_KEY_CODE_HELP] = ADB_KEY_HELP,
- [Q_KEY_CODE_INSERT] = ADB_KEY_HELP,
- [Q_KEY_CODE_DELETE] = ADB_KEY_FORWARD_DELETE,
- [Q_KEY_CODE_HOME] = ADB_KEY_HOME,
- [Q_KEY_CODE_END] = ADB_KEY_END,
- [Q_KEY_CODE_PGUP] = ADB_KEY_PAGE_UP,
- [Q_KEY_CODE_PGDN] = ADB_KEY_PAGE_DOWN,
-
- [Q_KEY_CODE_POWER] = ADB_KEY_POWER
-};
-
-static void adb_kbd_put_keycode(void *opaque, int keycode)
-{
- KBDState *s = opaque;
-
- if (s->count < sizeof(s->data)) {
- s->data[s->wptr] = keycode;
- if (++s->wptr == sizeof(s->data))
- s->wptr = 0;
- s->count++;
- }
-}
-
-static int adb_kbd_poll(ADBDevice *d, uint8_t *obuf)
-{
- KBDState *s = ADB_KEYBOARD(d);
- int keycode;
- int olen;
-
- olen = 0;
- if (s->count == 0) {
- return 0;
- }
- keycode = s->data[s->rptr];
- s->rptr++;
- if (s->rptr == sizeof(s->data)) {
- s->rptr = 0;
- }
- s->count--;
- /*
- * The power key is the only two byte value key, so it is a special case.
- * Since 0x7f is not a used keycode for ADB we overload it to indicate the
- * power button when we're storing keycodes in our internal buffer, and
- * expand it out to two bytes when we send to the guest.
- */
- if (keycode == 0x7f) {
- obuf[0] = 0x7f;
- obuf[1] = 0x7f;
- olen = 2;
- } else {
- obuf[0] = keycode;
- /* NOTE: the power key key-up is the two byte sequence 0xff 0xff;
- * otherwise we could in theory send a second keycode in the second
- * byte, but choose not to bother.
- */
- obuf[1] = 0xff;
- olen = 2;
- }
-
- return olen;
-}
-
-static int adb_kbd_request(ADBDevice *d, uint8_t *obuf,
- const uint8_t *buf, int len)
-{
- KBDState *s = ADB_KEYBOARD(d);
- int cmd, reg, olen;
-
- if ((buf[0] & 0x0f) == ADB_FLUSH) {
- /* flush keyboard fifo */
- s->wptr = s->rptr = s->count = 0;
- return 0;
- }
-
- cmd = buf[0] & 0xc;
- reg = buf[0] & 0x3;
- olen = 0;
- switch(cmd) {
- case ADB_WRITEREG:
- switch(reg) {
- case 2:
- /* LED status */
- break;
- case 3:
- switch(buf[2]) {
- case ADB_CMD_SELF_TEST:
- break;
- case ADB_CMD_CHANGE_ID:
- case ADB_CMD_CHANGE_ID_AND_ACT:
- case ADB_CMD_CHANGE_ID_AND_ENABLE:
- d->devaddr = buf[1] & 0xf;
- break;
- default:
- d->devaddr = buf[1] & 0xf;
- /* we support handlers:
- * 1: Apple Standard Keyboard
- * 2: Apple Extended Keyboard (LShift = RShift)
- * 3: Apple Extended Keyboard (LShift != RShift)
- */
- if (buf[2] == 1 || buf[2] == 2 || buf[2] == 3) {
- d->handler = buf[2];
- }
- break;
- }
- }
- break;
- case ADB_READREG:
- switch(reg) {
- case 0:
- olen = adb_kbd_poll(d, obuf);
- break;
- case 1:
- break;
- case 2:
- obuf[0] = 0x00; /* XXX: check this */
- obuf[1] = 0x07; /* led status */
- olen = 2;
- break;
- case 3:
- obuf[0] = d->handler;
- obuf[1] = d->devaddr;
- olen = 2;
- break;
- }
- break;
- }
- return olen;
-}
-
-/* This is where keyboard events enter this file */
-static void adb_keyboard_event(DeviceState *dev, QemuConsole *src,
- InputEvent *evt)
-{
- KBDState *s = (KBDState *)dev;
- int qcode, keycode;
-
- qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key);
- if (qcode >= ARRAY_SIZE(qcode_to_adb_keycode)) {
- return;
- }
- /* FIXME: take handler into account when translating qcode */
- keycode = qcode_to_adb_keycode[qcode];
- if (keycode == NO_KEY) { /* We don't want to send this to the guest */
- ADB_DPRINTF("Ignoring NO_KEY\n");
- return;
- }
- if (evt->u.key.data->down == false) { /* if key release event */
- keycode = keycode | 0x80; /* create keyboard break code */
- }
-
- adb_kbd_put_keycode(s, keycode);
-}
-
-static const VMStateDescription vmstate_adb_kbd = {
- .name = "adb_kbd",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (VMStateField[]) {
- VMSTATE_STRUCT(parent_obj, KBDState, 0, vmstate_adb_device, ADBDevice),
- VMSTATE_BUFFER(data, KBDState),
- VMSTATE_INT32(rptr, KBDState),
- VMSTATE_INT32(wptr, KBDState),
- VMSTATE_INT32(count, KBDState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void adb_kbd_reset(DeviceState *dev)
-{
- ADBDevice *d = ADB_DEVICE(dev);
- KBDState *s = ADB_KEYBOARD(dev);
-
- d->handler = 1;
- d->devaddr = ADB_DEVID_KEYBOARD;
- memset(s->data, 0, sizeof(s->data));
- s->rptr = 0;
- s->wptr = 0;
- s->count = 0;
-}
-
-static QemuInputHandler adb_keyboard_handler = {
- .name = "QEMU ADB Keyboard",
- .mask = INPUT_EVENT_MASK_KEY,
- .event = adb_keyboard_event,
-};
-
-static void adb_kbd_realizefn(DeviceState *dev, Error **errp)
-{
- ADBKeyboardClass *akc = ADB_KEYBOARD_GET_CLASS(dev);
- akc->parent_realize(dev, errp);
- qemu_input_handler_register(dev, &adb_keyboard_handler);
-}
-
-static void adb_kbd_initfn(Object *obj)
-{
- ADBDevice *d = ADB_DEVICE(obj);
-
- d->devaddr = ADB_DEVID_KEYBOARD;
-}
-
-static void adb_kbd_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc);
- ADBKeyboardClass *akc = ADB_KEYBOARD_CLASS(oc);
-
- akc->parent_realize = dc->realize;
- dc->realize = adb_kbd_realizefn;
- set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
-
- adc->devreq = adb_kbd_request;
- dc->reset = adb_kbd_reset;
- dc->vmsd = &vmstate_adb_kbd;
-}
-
-static const TypeInfo adb_kbd_type_info = {
- .name = TYPE_ADB_KEYBOARD,
- .parent = TYPE_ADB_DEVICE,
- .instance_size = sizeof(KBDState),
- .instance_init = adb_kbd_initfn,
- .class_init = adb_kbd_class_init,
- .class_size = sizeof(ADBKeyboardClass),
-};
-
-/***************************************************************/
-/* Mouse ADB device */
-
-#define ADB_MOUSE(obj) OBJECT_CHECK(MouseState, (obj), TYPE_ADB_MOUSE)
-
-typedef struct MouseState {
- /*< public >*/
- ADBDevice parent_obj;
- /*< private >*/
-
- int buttons_state, last_buttons_state;
- int dx, dy, dz;
-} MouseState;
-
-#define ADB_MOUSE_CLASS(class) \
- OBJECT_CLASS_CHECK(ADBMouseClass, (class), TYPE_ADB_MOUSE)
-#define ADB_MOUSE_GET_CLASS(obj) \
- OBJECT_GET_CLASS(ADBMouseClass, (obj), TYPE_ADB_MOUSE)
-
-typedef struct ADBMouseClass {
- /*< public >*/
- ADBDeviceClass parent_class;
- /*< private >*/
-
- DeviceRealize parent_realize;
-} ADBMouseClass;
-
-static void adb_mouse_event(void *opaque,
- int dx1, int dy1, int dz1, int buttons_state)
-{
- MouseState *s = opaque;
-
- s->dx += dx1;
- s->dy += dy1;
- s->dz += dz1;
- s->buttons_state = buttons_state;
-}
-
-
-static int adb_mouse_poll(ADBDevice *d, uint8_t *obuf)
-{
- MouseState *s = ADB_MOUSE(d);
- int dx, dy;
-
- if (s->last_buttons_state == s->buttons_state &&
- s->dx == 0 && s->dy == 0)
- return 0;
-
- dx = s->dx;
- if (dx < -63)
- dx = -63;
- else if (dx > 63)
- dx = 63;
-
- dy = s->dy;
- if (dy < -63)
- dy = -63;
- else if (dy > 63)
- dy = 63;
-
- s->dx -= dx;
- s->dy -= dy;
- s->last_buttons_state = s->buttons_state;
-
- dx &= 0x7f;
- dy &= 0x7f;
-
- if (!(s->buttons_state & MOUSE_EVENT_LBUTTON))
- dy |= 0x80;
- if (!(s->buttons_state & MOUSE_EVENT_RBUTTON))
- dx |= 0x80;
-
- obuf[0] = dy;
- obuf[1] = dx;
- return 2;
-}
-
-static int adb_mouse_request(ADBDevice *d, uint8_t *obuf,
- const uint8_t *buf, int len)
-{
- MouseState *s = ADB_MOUSE(d);
- int cmd, reg, olen;
-
- if ((buf[0] & 0x0f) == ADB_FLUSH) {
- /* flush mouse fifo */
- s->buttons_state = s->last_buttons_state;
- s->dx = 0;
- s->dy = 0;
- s->dz = 0;
- return 0;
- }
-
- cmd = buf[0] & 0xc;
- reg = buf[0] & 0x3;
- olen = 0;
- switch(cmd) {
- case ADB_WRITEREG:
- ADB_DPRINTF("write reg %d val 0x%2.2x\n", reg, buf[1]);
- switch(reg) {
- case 2:
- break;
- case 3:
- switch(buf[2]) {
- case ADB_CMD_SELF_TEST:
- break;
- case ADB_CMD_CHANGE_ID:
- case ADB_CMD_CHANGE_ID_AND_ACT:
- case ADB_CMD_CHANGE_ID_AND_ENABLE:
- d->devaddr = buf[1] & 0xf;
- break;
- default:
- d->devaddr = buf[1] & 0xf;
- /* we support handlers:
- * 0x01: Classic Apple Mouse Protocol / 100 cpi operations
- * 0x02: Classic Apple Mouse Protocol / 200 cpi operations
- * we don't support handlers (at least):
- * 0x03: Mouse systems A3 trackball
- * 0x04: Extended Apple Mouse Protocol
- * 0x2f: Microspeed mouse
- * 0x42: Macally
- * 0x5f: Microspeed mouse
- * 0x66: Microspeed mouse
- */
- if (buf[2] == 1 || buf[2] == 2) {
- d->handler = buf[2];
- }
- break;
- }
- }
- break;
- case ADB_READREG:
- switch(reg) {
- case 0:
- olen = adb_mouse_poll(d, obuf);
- break;
- case 1:
- break;
- case 3:
- obuf[0] = d->handler;
- obuf[1] = d->devaddr;
- olen = 2;
- break;
- }
- ADB_DPRINTF("read reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x\n", reg,
- obuf[0], obuf[1]);
- break;
- }
- return olen;
-}
-
-static void adb_mouse_reset(DeviceState *dev)
-{
- ADBDevice *d = ADB_DEVICE(dev);
- MouseState *s = ADB_MOUSE(dev);
-
- d->handler = 2;
- d->devaddr = ADB_DEVID_MOUSE;
- s->last_buttons_state = s->buttons_state = 0;
- s->dx = s->dy = s->dz = 0;
-}
-
-static const VMStateDescription vmstate_adb_mouse = {
- .name = "adb_mouse",
- .version_id = 2,
- .minimum_version_id = 2,
- .fields = (VMStateField[]) {
- VMSTATE_STRUCT(parent_obj, MouseState, 0, vmstate_adb_device,
- ADBDevice),
- VMSTATE_INT32(buttons_state, MouseState),
- VMSTATE_INT32(last_buttons_state, MouseState),
- VMSTATE_INT32(dx, MouseState),
- VMSTATE_INT32(dy, MouseState),
- VMSTATE_INT32(dz, MouseState),
- VMSTATE_END_OF_LIST()
- }
-};
-
-static void adb_mouse_realizefn(DeviceState *dev, Error **errp)
-{
- MouseState *s = ADB_MOUSE(dev);
- ADBMouseClass *amc = ADB_MOUSE_GET_CLASS(dev);
-
- amc->parent_realize(dev, errp);
-
- qemu_add_mouse_event_handler(adb_mouse_event, s, 0, "QEMU ADB Mouse");
-}
-
-static void adb_mouse_initfn(Object *obj)
-{
- ADBDevice *d = ADB_DEVICE(obj);
-
- d->devaddr = ADB_DEVID_MOUSE;
-}
-
-static void adb_mouse_class_init(ObjectClass *oc, void *data)
-{
- DeviceClass *dc = DEVICE_CLASS(oc);
- ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc);
- ADBMouseClass *amc = ADB_MOUSE_CLASS(oc);
-
- amc->parent_realize = dc->realize;
- dc->realize = adb_mouse_realizefn;
- set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
-
- adc->devreq = adb_mouse_request;
- dc->reset = adb_mouse_reset;
- dc->vmsd = &vmstate_adb_mouse;
-}
-
-static const TypeInfo adb_mouse_type_info = {
- .name = TYPE_ADB_MOUSE,
- .parent = TYPE_ADB_DEVICE,
- .instance_size = sizeof(MouseState),
- .instance_init = adb_mouse_initfn,
- .class_init = adb_mouse_class_init,
- .class_size = sizeof(ADBMouseClass),
-};
-
-
static void adb_register_types(void)
{
type_register_static(&adb_bus_type_info);
type_register_static(&adb_device_type_info);
- type_register_static(&adb_kbd_type_info);
- type_register_static(&adb_mouse_type_info);
}
type_init(adb_register_types)
diff --git a/hw/input/hid.c b/hw/input/hid.c
index 0d049ff61c..aa4fb826fd 100644
--- a/hw/input/hid.c
+++ b/hw/input/hid.c
@@ -57,14 +57,14 @@ static const uint8_t hid_usage_keys[0x100] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x58, 0xe4, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x00, 0x46,
+ 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x54, 0x00, 0x46,
0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x48, 0x4a,
0x52, 0x4b, 0x00, 0x50, 0x00, 0x4f, 0x00, 0x4d,
0x51, 0x4e, 0x49, 0x4c, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0xe3, 0xe7, 0x65, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xe3, 0xe7, 0x65, 0x66, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/hw/input/trace-events b/hw/input/trace-events
index 88150ef7a6..a8d46cb766 100644
--- a/hw/input/trace-events
+++ b/hw/input/trace-events
@@ -1,5 +1,13 @@
# See docs/devel/tracing.txt for syntax documentation.
+# hw/input/adb-kbd.c
+adb_kbd_no_key(void) "Ignoring NO_KEY"
+adb_kbd_writereg(int reg, uint8_t val) "reg %d val 0x%2.2x"
+adb_kbd_readreg(int reg, uint8_t val0, uint8_t val1) "reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x"
+# hw/input/adb-mouse.c
+adb_mouse_writereg(int reg, uint8_t val) "reg %d val 0x%2.2x"
+adb_mouse_readreg(int reg, uint8_t val0, uint8_t val1) "reg %d obuf[0] 0x%2.2x obuf[1] 0x%2.2x"
+
# hw/input/ps2.c
ps2_put_keycode(void *opaque, int keycode) "%p keycode 0x%02x"
ps2_keyboard_event(void *opaque, int qcode, int down, unsigned int modifier, unsigned int modifiers) "%p qcode %d down %d modifier 0x%x modifiers 0x%x"
diff --git a/hw/intc/apic.c b/hw/intc/apic.c
index fe15fb6024..6fda52b86c 100644
--- a/hw/intc/apic.c
+++ b/hw/intc/apic.c
@@ -305,6 +305,18 @@ static void apic_set_tpr(APICCommonState *s, uint8_t val)
}
}
+int apic_get_highest_priority_irr(DeviceState *dev)
+{
+ APICCommonState *s;
+
+ if (!dev) {
+ /* no interrupts */
+ return -1;
+ }
+ s = APIC_COMMON(dev);
+ return get_highest_priority_int(s->irr);
+}
+
static uint8_t apic_get_tpr(APICCommonState *s)
{
apic_sync_vapic(s, SYNC_FROM_VAPIC);
diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c
index f2cce597a9..2bd2f0f3c9 100644
--- a/hw/intc/arm_gicv3_its_common.c
+++ b/hw/intc/arm_gicv3_its_common.c
@@ -131,8 +131,6 @@ static void gicv3_its_common_reset(DeviceState *dev)
s->creadr = 0;
s->iidr = 0;
memset(&s->baser, 0, sizeof(s->baser));
-
- gicv3_its_post_load(s, 0);
}
static void gicv3_its_common_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c
index 6fb45dffd7..bf290b8bff 100644
--- a/hw/intc/arm_gicv3_its_kvm.c
+++ b/hw/intc/arm_gicv3_its_kvm.c
@@ -28,6 +28,16 @@
#define TYPE_KVM_ARM_ITS "arm-its-kvm"
#define KVM_ARM_ITS(obj) OBJECT_CHECK(GICv3ITSState, (obj), TYPE_KVM_ARM_ITS)
+#define KVM_ARM_ITS_CLASS(klass) \
+ OBJECT_CLASS_CHECK(KVMARMITSClass, (klass), TYPE_KVM_ARM_ITS)
+#define KVM_ARM_ITS_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(KVMARMITSClass, (obj), TYPE_KVM_ARM_ITS)
+
+typedef struct KVMARMITSClass {
+ GICv3ITSCommonClass parent_class;
+ void (*parent_reset)(DeviceState *dev);
+} KVMARMITSClass;
+
static int kvm_its_send_msi(GICv3ITSState *s, uint32_t value, uint16_t devid)
{
@@ -155,10 +165,6 @@ static void kvm_arm_its_post_load(GICv3ITSState *s)
{
int i;
- if (!s->iidr) {
- return;
- }
-
kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
GITS_IIDR, &s->iidr, true, &error_abort);
@@ -190,6 +196,41 @@ static void kvm_arm_its_post_load(GICv3ITSState *s)
GITS_CTLR, &s->ctlr, true, &error_abort);
}
+static void kvm_arm_its_reset(DeviceState *dev)
+{
+ GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
+ KVMARMITSClass *c = KVM_ARM_ITS_GET_CLASS(s);
+ int i;
+
+ c->parent_reset(dev);
+
+ if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_ITS_CTRL_RESET)) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_ITS_CTRL_RESET, NULL, true, &error_abort);
+ return;
+ }
+
+ error_report("ITS KVM: full reset is not supported by the host kernel");
+
+ if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR)) {
+ return;
+ }
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CTLR, &s->ctlr, true, &error_abort);
+
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_CBASER, &s->cbaser, true, &error_abort);
+
+ for (i = 0; i < 8; i++) {
+ kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS,
+ GITS_BASER + i * 8, &s->baser[i], true,
+ &error_abort);
+ }
+}
+
static Property kvm_arm_its_props[] = {
DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "kvm-arm-gicv3",
GICv3State *),
@@ -200,12 +241,15 @@ static void kvm_arm_its_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
+ KVMARMITSClass *ic = KVM_ARM_ITS_CLASS(klass);
dc->realize = kvm_arm_its_realize;
dc->props = kvm_arm_its_props;
+ ic->parent_reset = dc->reset;
icc->send_msi = kvm_its_send_msi;
icc->pre_save = kvm_arm_its_pre_save;
icc->post_load = kvm_arm_its_post_load;
+ dc->reset = kvm_arm_its_reset;
}
static const TypeInfo kvm_arm_its_info = {
@@ -213,6 +257,7 @@ static const TypeInfo kvm_arm_its_info = {
.parent = TYPE_ARM_GICV3_ITS_COMMON,
.instance_size = sizeof(GICv3ITSState),
.class_init = kvm_arm_its_class_init,
+ .class_size = sizeof(KVMARMITSClass),
};
static void kvm_arm_its_register_types(void)
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 5d9c8834ad..dd49b6c335 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -1786,10 +1786,12 @@ static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size,
MemTxAttrs attrs)
{
+ MemoryRegion *mr = opaque;
+
if (attrs.secure) {
/* S accesses to the alias act like NS accesses to the real region */
attrs.secure = 0;
- return nvic_sysreg_write(opaque, addr, value, size, attrs);
+ return memory_region_dispatch_write(mr, addr, value, size, attrs);
} else {
/* NS attrs are RAZ/WI for privileged, and BusFault for user */
if (attrs.user) {
@@ -1803,10 +1805,12 @@ static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
uint64_t *data, unsigned size,
MemTxAttrs attrs)
{
+ MemoryRegion *mr = opaque;
+
if (attrs.secure) {
/* S accesses to the alias act like NS accesses to the real region */
attrs.secure = 0;
- return nvic_sysreg_read(opaque, addr, data, size, attrs);
+ return memory_region_dispatch_read(mr, addr, data, size, attrs);
} else {
/* NS attrs are RAZ/WI for privileged, and BusFault for user */
if (attrs.user) {
@@ -1823,6 +1827,36 @@ static const MemoryRegionOps nvic_sysreg_ns_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
+static MemTxResult nvic_systick_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ NVICState *s = opaque;
+ MemoryRegion *mr;
+
+ /* Direct the access to the correct systick */
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
+ return memory_region_dispatch_write(mr, addr, value, size, attrs);
+}
+
+static MemTxResult nvic_systick_read(void *opaque, hwaddr addr,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ NVICState *s = opaque;
+ MemoryRegion *mr;
+
+ /* Direct the access to the correct systick */
+ mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->systick[attrs.secure]), 0);
+ return memory_region_dispatch_read(mr, addr, data, size, attrs);
+}
+
+static const MemoryRegionOps nvic_systick_ops = {
+ .read_with_attrs = nvic_systick_read,
+ .write_with_attrs = nvic_systick_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
static int nvic_post_load(void *opaque, int version_id)
{
NVICState *s = opaque;
@@ -2001,17 +2035,16 @@ static void nvic_systick_trigger(void *opaque, int n, int level)
/* SysTick just asked us to pend its exception.
* (This is different from an external interrupt line's
* behaviour.)
- * TODO: when we implement the banked systicks we must make
- * this pend the correct banked exception.
+ * n == 0 : NonSecure systick
+ * n == 1 : Secure systick
*/
- armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, false);
+ armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
}
}
static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
{
NVICState *s = NVIC(dev);
- SysBusDevice *systick_sbd;
Error *err = NULL;
int regionlen;
@@ -2028,14 +2061,35 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
/* include space for internal exception vectors */
s->num_irq += NVIC_FIRST_IRQ;
- object_property_set_bool(OBJECT(&s->systick), true, "realized", &err);
+ object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
+ "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
- systick_sbd = SYS_BUS_DEVICE(&s->systick);
- sysbus_connect_irq(systick_sbd, 0,
- qdev_get_gpio_in_named(dev, "systick-trigger", 0));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_NS]), 0,
+ qdev_get_gpio_in_named(dev, "systick-trigger",
+ M_REG_NS));
+
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
+ /* We couldn't init the secure systick device in instance_init
+ * as we didn't know then if the CPU had the security extensions;
+ * so we have to do it here.
+ */
+ object_initialize(&s->systick[M_REG_S], sizeof(s->systick[M_REG_S]),
+ TYPE_SYSTICK);
+ qdev_set_parent_bus(DEVICE(&s->systick[M_REG_S]), sysbus_get_default());
+
+ object_property_set_bool(OBJECT(&s->systick[M_REG_S]), true,
+ "realized", &err);
+ if (err != NULL) {
+ error_propagate(errp, err);
+ return;
+ }
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->systick[M_REG_S]), 0,
+ qdev_get_gpio_in_named(dev, "systick-trigger",
+ M_REG_S));
+ }
/* The NVIC and System Control Space (SCS) starts at 0xe000e000
* and looks like this:
@@ -2069,15 +2123,24 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
"nvic_sysregs", 0x1000);
memory_region_add_subregion(&s->container, 0, &s->sysregmem);
+
+ memory_region_init_io(&s->systickmem, OBJECT(s),
+ &nvic_systick_ops, s,
+ "nvic_systick", 0xe0);
+
memory_region_add_subregion_overlap(&s->container, 0x10,
- sysbus_mmio_get_region(systick_sbd, 0),
- 1);
+ &s->systickmem, 1);
if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
- &nvic_sysreg_ns_ops, s,
+ &nvic_sysreg_ns_ops, &s->sysregmem,
"nvic_sysregs_ns", 0x1000);
memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
+ memory_region_init_io(&s->systick_ns_mem, OBJECT(s),
+ &nvic_sysreg_ns_ops, &s->systickmem,
+ "nvic_systick_ns", 0xe0);
+ memory_region_add_subregion_overlap(&s->container, 0x20010,
+ &s->systick_ns_mem, 1);
}
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
@@ -2095,12 +2158,17 @@ static void armv7m_nvic_instance_init(Object *obj)
NVICState *nvic = NVIC(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
- object_initialize(&nvic->systick, sizeof(nvic->systick), TYPE_SYSTICK);
- qdev_set_parent_bus(DEVICE(&nvic->systick), sysbus_get_default());
+ object_initialize(&nvic->systick[M_REG_NS],
+ sizeof(nvic->systick[M_REG_NS]), TYPE_SYSTICK);
+ qdev_set_parent_bus(DEVICE(&nvic->systick[M_REG_NS]), sysbus_get_default());
+ /* We can't initialize the secure systick here, as we don't know
+ * yet if we need it.
+ */
sysbus_init_irq(sbd, &nvic->excpout);
qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
- qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 1);
+ qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
+ M_REG_NUM_BANKS);
}
static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
diff --git a/hw/intc/i8259.c b/hw/intc/i8259.c
index fe9ecd6bd4..1602255a87 100644
--- a/hw/intc/i8259.c
+++ b/hw/intc/i8259.c
@@ -25,24 +25,15 @@
#include "hw/hw.h"
#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
-#include "monitor/monitor.h"
#include "qemu/timer.h"
#include "qemu/log.h"
#include "hw/isa/i8259_internal.h"
-#include "hw/intc/intc.h"
+#include "trace.h"
/* debug PIC */
//#define DEBUG_PIC
-#ifdef DEBUG_PIC
-#define DPRINTF(fmt, ...) \
- do { printf("pic: " fmt , ## __VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...)
-#endif
-
//#define DEBUG_IRQ_LATENCY
-//#define DEBUG_IRQ_COUNT
#define TYPE_I8259 "isa-i8259"
#define PIC_CLASS(class) OBJECT_CLASS_CHECK(PICClass, (class), TYPE_I8259)
@@ -58,12 +49,6 @@ typedef struct PICClass {
DeviceRealize parent_realize;
} PICClass;
-#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT)
-static int irq_level[16];
-#endif
-#ifdef DEBUG_IRQ_COUNT
-static uint64_t irq_count[16];
-#endif
#ifdef DEBUG_IRQ_LATENCY
static int64_t irq_time[16];
#endif
@@ -122,8 +107,7 @@ static void pic_update_irq(PICCommonState *s)
irq = pic_get_irq(s);
if (irq >= 0) {
- DPRINTF("pic%d: imr=%x irr=%x padd=%d\n",
- s->master ? 0 : 1, s->imr, s->irr, s->priority_add);
+ trace_pic_update_irq(s->master, s->imr, s->irr, s->priority_add);
qemu_irq_raise(s->int_out[0]);
} else {
qemu_irq_lower(s->int_out[0]);
@@ -135,22 +119,11 @@ static void pic_set_irq(void *opaque, int irq, int level)
{
PICCommonState *s = opaque;
int mask = 1 << irq;
-
-#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT) || \
- defined(DEBUG_IRQ_LATENCY)
int irq_index = s->master ? irq : irq + 8;
-#endif
-#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_COUNT)
- if (level != irq_level[irq_index]) {
- DPRINTF("pic_set_irq: irq=%d level=%d\n", irq_index, level);
- irq_level[irq_index] = level;
-#ifdef DEBUG_IRQ_COUNT
- if (level == 1) {
- irq_count[irq_index]++;
- }
-#endif
- }
-#endif
+
+ trace_pic_set_irq(s->master, irq, level);
+ pic_stat_update_irq(irq_index, level);
+
#ifdef DEBUG_IRQ_LATENCY
if (level) {
irq_time[irq_index] = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
@@ -223,18 +196,18 @@ int pic_read_irq(DeviceState *d)
intno = s->irq_base + irq;
}
-#if defined(DEBUG_PIC) || defined(DEBUG_IRQ_LATENCY)
if (irq == 2) {
irq = irq2 + 8;
}
-#endif
+
#ifdef DEBUG_IRQ_LATENCY
printf("IRQ%d latency=%0.3fus\n",
irq,
(double)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
irq_time[irq]) * 1000000.0 / NANOSECONDS_PER_SECOND);
#endif
- DPRINTF("pic_interrupt: irq=%d\n", irq);
+
+ trace_pic_interrupt(irq, intno);
return intno;
}
@@ -252,35 +225,6 @@ static void pic_reset(DeviceState *dev)
pic_init_reset(s);
}
-static bool pic_get_statistics(InterruptStatsProvider *obj,
- uint64_t **irq_counts, unsigned int *nb_irqs)
-{
- PICCommonState *s = PIC_COMMON(obj);
-
- if (s->master) {
-#ifdef DEBUG_IRQ_COUNT
- *irq_counts = irq_count;
- *nb_irqs = ARRAY_SIZE(irq_count);
-#else
- return false;
-#endif
- } else {
- *irq_counts = NULL;
- *nb_irqs = 0;
- }
- return true;
-}
-
-static void pic_print_info(InterruptStatsProvider *obj, Monitor *mon)
-{
- PICCommonState *s = PIC_COMMON(obj);
- monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d "
- "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n",
- s->master ? 0 : 1, s->irr, s->imr, s->isr, s->priority_add,
- s->irq_base, s->read_reg_select, s->elcr,
- s->special_fully_nested_mode);
-}
-
static void pic_ioport_write(void *opaque, hwaddr addr64,
uint64_t val64, unsigned size)
{
@@ -289,7 +233,8 @@ static void pic_ioport_write(void *opaque, hwaddr addr64,
uint32_t val = val64;
int priority, cmd, irq;
- DPRINTF("write: addr=0x%02x val=0x%02x\n", addr, val);
+ trace_pic_ioport_write(s->master, addr, val);
+
if (addr == 0) {
if (val & 0x10) {
pic_init_reset(s);
@@ -402,7 +347,7 @@ static uint64_t pic_ioport_read(void *opaque, hwaddr addr,
ret = s->imr;
}
}
- DPRINTF("read: addr=0x%02" HWADDR_PRIx " val=0x%02x\n", addr, ret);
+ trace_pic_ioport_read(s->master, addr, ret);
return ret;
}
@@ -497,13 +442,10 @@ static void i8259_class_init(ObjectClass *klass, void *data)
{
PICClass *k = PIC_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
- InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
k->parent_realize = dc->realize;
dc->realize = pic_realize;
dc->reset = pic_reset;
- ic->get_statistics = pic_get_statistics;
- ic->print_info = pic_print_info;
}
static const TypeInfo i8259_info = {
@@ -512,10 +454,6 @@ static const TypeInfo i8259_info = {
.parent = TYPE_PIC_COMMON,
.class_init = i8259_class_init,
.class_size = sizeof(PICClass),
- .interfaces = (InterfaceInfo[]) {
- { TYPE_INTERRUPT_STATS_PROVIDER },
- { }
- },
};
static void pic_register_types(void)
diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c
index 18427b459a..c75c880157 100644
--- a/hw/intc/i8259_common.c
+++ b/hw/intc/i8259_common.c
@@ -25,6 +25,10 @@
#include "qemu/osdep.h"
#include "hw/i386/pc.h"
#include "hw/isa/i8259_internal.h"
+#include "monitor/monitor.h"
+
+static int irq_level[16];
+static uint64_t irq_count[16];
void pic_reset_common(PICCommonState *s)
{
@@ -98,6 +102,44 @@ ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master)
return isadev;
}
+void pic_stat_update_irq(int irq, int level)
+{
+ if (level != irq_level[irq]) {
+ irq_level[irq] = level;
+ if (level == 1) {
+ irq_count[irq]++;
+ }
+ }
+}
+
+bool pic_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs)
+{
+ PICCommonState *s = PIC_COMMON(obj);
+
+ if (s->master) {
+ *irq_counts = irq_count;
+ *nb_irqs = ARRAY_SIZE(irq_count);
+ } else {
+ *irq_counts = NULL;
+ *nb_irqs = 0;
+ }
+
+ return true;
+}
+
+void pic_print_info(InterruptStatsProvider *obj, Monitor *mon)
+{
+ PICCommonState *s = PIC_COMMON(obj);
+
+ pic_dispatch_pre_save(s);
+ monitor_printf(mon, "pic%d: irr=%02x imr=%02x isr=%02x hprio=%d "
+ "irq_base=%02x rr_sel=%d elcr=%02x fnm=%d\n",
+ s->master ? 0 : 1, s->irr, s->imr, s->isr, s->priority_add,
+ s->irq_base, s->read_reg_select, s->elcr,
+ s->special_fully_nested_mode);
+}
+
static const VMStateDescription vmstate_pic_common = {
.name = "i8259",
.version_id = 1,
@@ -136,6 +178,7 @@ static Property pic_properties_common[] = {
static void pic_common_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ InterruptStatsProviderClass *ic = INTERRUPT_STATS_PROVIDER_CLASS(klass);
dc->vmsd = &vmstate_pic_common;
dc->props = pic_properties_common;
@@ -147,6 +190,8 @@ static void pic_common_class_init(ObjectClass *klass, void *data)
* code.
*/
dc->user_creatable = false;
+ ic->get_statistics = pic_get_statistics;
+ ic->print_info = pic_print_info;
}
static const TypeInfo pic_common_type = {
@@ -156,6 +201,10 @@ static const TypeInfo pic_common_type = {
.class_size = sizeof(PICCommonClass),
.class_init = pic_common_class_init,
.abstract = true,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_INTERRUPT_STATS_PROVIDER },
+ { }
+ },
};
static void pic_common_register_types(void)
diff --git a/hw/intc/lm32_pic.c b/hw/intc/lm32_pic.c
index 09e15115fb..db6c7afc2f 100644
--- a/hw/intc/lm32_pic.c
+++ b/hw/intc/lm32_pic.c
@@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "monitor/monitor.h"
#include "hw/sysbus.h"
#include "trace.h"
diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c
index 10d6e871fb..9159a06f07 100644
--- a/hw/intc/openpic.c
+++ b/hw/intc/openpic.c
@@ -46,6 +46,7 @@
#include "qapi/qmp/qerror.h"
#include "qemu/log.h"
#include "qemu/timer.h"
+#include "qemu/error-report.h"
//#define DEBUG_OPENPIC
@@ -58,8 +59,7 @@ static const int debug_openpic = 0;
static int get_current_cpu(void);
#define DPRINTF(fmt, ...) do { \
if (debug_openpic) { \
- printf("Core%d: ", get_current_cpu()); \
- printf(fmt , ## __VA_ARGS__); \
+ info_report("Core%d: " fmt, get_current_cpu(), ## __VA_ARGS__); \
} \
} while (0)
@@ -173,7 +173,7 @@ static int inttgt_to_output(int inttgt)
}
}
- fprintf(stderr, "%s: unsupported inttgt %d\n", __func__, inttgt);
+ error_report("%s: unsupported inttgt %d", __func__, inttgt);
return OPENPIC_OUTPUT_INT;
}
@@ -372,7 +372,7 @@ static void IRQ_check(OpenPICState *opp, IRQQueue *q)
break;
}
- DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d\n",
+ DPRINTF("IRQ_check: irq %d set ivpr_pr=%d pr=%d",
irq, IVPR_PRIORITY(opp->src[irq].ivpr), priority);
if (IVPR_PRIORITY(opp->src[irq].ivpr) > priority) {
@@ -403,11 +403,11 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
dst = &opp->dst[n_CPU];
src = &opp->src[n_IRQ];
- DPRINTF("%s: IRQ %d active %d was %d\n",
+ DPRINTF("%s: IRQ %d active %d was %d",
__func__, n_IRQ, active, was_active);
if (src->output != OPENPIC_OUTPUT_INT) {
- DPRINTF("%s: output %d irq %d active %d was %d count %d\n",
+ DPRINTF("%s: output %d irq %d active %d was %d count %d",
__func__, src->output, n_IRQ, active, was_active,
dst->outputs_active[src->output]);
@@ -417,13 +417,13 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
*/
if (active) {
if (!was_active && dst->outputs_active[src->output]++ == 0) {
- DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d\n",
+ DPRINTF("%s: Raise OpenPIC output %d cpu %d irq %d",
__func__, src->output, n_CPU, n_IRQ);
qemu_irq_raise(dst->irqs[src->output]);
}
} else {
if (was_active && --dst->outputs_active[src->output] == 0) {
- DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d\n",
+ DPRINTF("%s: Lower OpenPIC output %d cpu %d irq %d",
__func__, src->output, n_CPU, n_IRQ);
qemu_irq_lower(dst->irqs[src->output]);
}
@@ -446,7 +446,7 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
IRQ_check(opp, &dst->raised);
if (active && priority <= dst->ctpr) {
- DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d\n",
+ DPRINTF("%s: IRQ %d priority %d too low for ctpr %d on CPU %d",
__func__, n_IRQ, priority, dst->ctpr, n_CPU);
active = 0;
}
@@ -454,10 +454,10 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
if (active) {
if (IRQ_get_next(opp, &dst->servicing) >= 0 &&
priority <= dst->servicing.priority) {
- DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d\n",
+ DPRINTF("%s: IRQ %d is hidden by servicing IRQ %d on CPU %d",
__func__, n_IRQ, dst->servicing.next, n_CPU);
} else {
- DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d\n",
+ DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d/%d",
__func__, n_CPU, n_IRQ, dst->raised.next);
qemu_irq_raise(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
}
@@ -465,12 +465,12 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ,
IRQ_get_next(opp, &dst->servicing);
if (dst->raised.priority > dst->ctpr &&
dst->raised.priority > dst->servicing.priority) {
- DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d\n",
+ DPRINTF("%s: IRQ %d inactive, IRQ %d prio %d above %d/%d, CPU %d",
__func__, n_IRQ, dst->raised.next, dst->raised.priority,
dst->ctpr, dst->servicing.priority, n_CPU);
/* IRQ line stays asserted */
} else {
- DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d\n",
+ DPRINTF("%s: IRQ %d inactive, current prio %d/%d, CPU %d",
__func__, n_IRQ, dst->ctpr, dst->servicing.priority, n_CPU);
qemu_irq_lower(opp->dst[n_CPU].irqs[OPENPIC_OUTPUT_INT]);
}
@@ -489,7 +489,7 @@ static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
if ((src->ivpr & IVPR_MASK_MASK) && !src->nomask) {
/* Interrupt source is disabled */
- DPRINTF("%s: IRQ %d is disabled\n", __func__, n_IRQ);
+ DPRINTF("%s: IRQ %d is disabled", __func__, n_IRQ);
active = false;
}
@@ -500,7 +500,7 @@ static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
* ctpr may have changed and we need to withdraw the interrupt.
*/
if (!active && !was_active) {
- DPRINTF("%s: IRQ %d is already inactive\n", __func__, n_IRQ);
+ DPRINTF("%s: IRQ %d is already inactive", __func__, n_IRQ);
return;
}
@@ -512,7 +512,7 @@ static void openpic_update_irq(OpenPICState *opp, int n_IRQ)
if (src->destmask == 0) {
/* No target */
- DPRINTF("%s: IRQ %d has no target\n", __func__, n_IRQ);
+ DPRINTF("%s: IRQ %d has no target", __func__, n_IRQ);
return;
}
@@ -547,12 +547,12 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level)
IRQSource *src;
if (n_IRQ >= OPENPIC_MAX_IRQ) {
- fprintf(stderr, "%s: IRQ %d out of range\n", __func__, n_IRQ);
+ error_report("%s: IRQ %d out of range", __func__, n_IRQ);
abort();
}
src = &opp->src[n_IRQ];
- DPRINTF("openpic: set irq %d = %d ivpr=0x%08x\n",
+ DPRINTF("openpic: set irq %d = %d ivpr=0x%08x",
n_IRQ, level, src->ivpr);
if (src->level) {
/* level-sensitive irq */
@@ -612,13 +612,13 @@ static inline void write_IRQreg_idr(OpenPICState *opp, int n_IRQ, uint32_t val)
}
src->idr = val & mask;
- DPRINTF("Set IDR %d to 0x%08x\n", n_IRQ, src->idr);
+ DPRINTF("Set IDR %d to 0x%08x", n_IRQ, src->idr);
if (opp->flags & OPENPIC_FLAG_IDR_CRIT) {
if (src->idr & crit_mask) {
if (src->idr & normal_mask) {
DPRINTF("%s: IRQ configured for multiple output types, using "
- "critical\n", __func__);
+ "critical", __func__);
}
src->output = OPENPIC_OUTPUT_CINT;
@@ -648,7 +648,7 @@ static inline void write_IRQreg_ilr(OpenPICState *opp, int n_IRQ, uint32_t val)
IRQSource *src = &opp->src[n_IRQ];
src->output = inttgt_to_output(val & ILR_INTTGT_MASK);
- DPRINTF("Set ILR %d to 0x%08x, output %d\n", n_IRQ, src->idr,
+ DPRINTF("Set ILR %d to 0x%08x, output %d", n_IRQ, src->idr,
src->output);
/* TODO: on MPIC v4.0 only, set nomask for non-INT */
@@ -688,7 +688,7 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val)
}
openpic_update_irq(opp, n_IRQ);
- DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x\n", n_IRQ, val,
+ DPRINTF("Set IVPR %d to 0x%08x -> 0x%08x", n_IRQ, val,
opp->src[n_IRQ].ivpr);
}
@@ -719,7 +719,7 @@ static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
IRQDest *dst;
int idx;
- DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n",
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
__func__, addr, val);
if (addr & 0xF) {
return;
@@ -747,11 +747,11 @@ static void openpic_gbl_write(void *opaque, hwaddr addr, uint64_t val,
case 0x1090: /* PIR */
for (idx = 0; idx < opp->nb_cpus; idx++) {
if ((val & (1 << idx)) && !(opp->pir & (1 << idx))) {
- DPRINTF("Raise OpenPIC RESET output for CPU %d\n", idx);
+ DPRINTF("Raise OpenPIC RESET output for CPU %d", idx);
dst = &opp->dst[idx];
qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_RESET]);
} else if (!(val & (1 << idx)) && (opp->pir & (1 << idx))) {
- DPRINTF("Lower OpenPIC RESET output for CPU %d\n", idx);
+ DPRINTF("Lower OpenPIC RESET output for CPU %d", idx);
dst = &opp->dst[idx];
qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_RESET]);
}
@@ -781,7 +781,7 @@ static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
OpenPICState *opp = opaque;
uint32_t retval;
- DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr);
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
retval = 0xFFFFFFFF;
if (addr & 0xF) {
return retval;
@@ -828,7 +828,7 @@ static uint64_t openpic_gbl_read(void *opaque, hwaddr addr, unsigned len)
default:
break;
}
- DPRINTF("%s: => 0x%08x\n", __func__, retval);
+ DPRINTF("%s: => 0x%08x", __func__, retval);
return retval;
}
@@ -843,7 +843,7 @@ static void qemu_timer_cb(void *opaque)
uint32_t val = tmr->tbcr & ~TBCR_CI;
uint32_t tog = ((tmr->tccr & TCCR_TOG) ^ TCCR_TOG); /* invert toggle. */
- DPRINTF("%s n_IRQ=%d\n", __func__, n_IRQ);
+ DPRINTF("%s n_IRQ=%d", __func__, n_IRQ);
/* Reload current count from base count and setup timer. */
tmr->tccr = val | tog;
openpic_tmr_set_tmr(tmr, val, /*enabled=*/true);
@@ -898,7 +898,7 @@ static void openpic_tmr_write(void *opaque, hwaddr addr, uint64_t val,
OpenPICState *opp = opaque;
int idx;
- DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n",
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
__func__, (addr + 0x10f0), val);
if (addr & 0xF) {
return;
@@ -943,7 +943,7 @@ static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
uint32_t retval = -1;
int idx;
- DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr + 0x10f0);
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr + 0x10f0);
if (addr & 0xF) {
goto out;
}
@@ -970,7 +970,7 @@ static uint64_t openpic_tmr_read(void *opaque, hwaddr addr, unsigned len)
}
out:
- DPRINTF("%s: => 0x%08x\n", __func__, retval);
+ DPRINTF("%s: => 0x%08x", __func__, retval);
return retval;
}
@@ -981,7 +981,7 @@ static void openpic_src_write(void *opaque, hwaddr addr, uint64_t val,
OpenPICState *opp = opaque;
int idx;
- DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64 "\n",
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= %08" PRIx64,
__func__, addr, val);
addr = addr & 0xffff;
@@ -1006,7 +1006,7 @@ static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
uint32_t retval;
int idx;
- DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr);
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
retval = 0xFFFFFFFF;
addr = addr & 0xffff;
@@ -1024,7 +1024,7 @@ static uint64_t openpic_src_read(void *opaque, uint64_t addr, unsigned len)
break;
}
- DPRINTF("%s: => 0x%08x\n", __func__, retval);
+ DPRINTF("%s: => 0x%08x", __func__, retval);
return retval;
}
@@ -1035,7 +1035,7 @@ static void openpic_msi_write(void *opaque, hwaddr addr, uint64_t val,
int idx = opp->irq_msi;
int srs, ibs;
- DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n",
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
__func__, addr, val);
if (addr & 0xF) {
return;
@@ -1061,7 +1061,7 @@ static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size)
uint64_t r = 0;
int i, srs;
- DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr);
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
if (addr & 0xF) {
return -1;
}
@@ -1096,7 +1096,7 @@ static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
{
uint64_t r = 0;
- DPRINTF("%s: addr %#" HWADDR_PRIx "\n", __func__, addr);
+ DPRINTF("%s: addr %#" HWADDR_PRIx, __func__, addr);
/* TODO: EISR/EIMR */
@@ -1106,7 +1106,7 @@ static uint64_t openpic_summary_read(void *opaque, hwaddr addr, unsigned size)
static void openpic_summary_write(void *opaque, hwaddr addr, uint64_t val,
unsigned size)
{
- DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64 "\n",
+ DPRINTF("%s: addr %#" HWADDR_PRIx " <= 0x%08" PRIx64,
__func__, addr, val);
/* TODO: EISR/EIMR */
@@ -1120,7 +1120,7 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
IRQDest *dst;
int s_IRQ, n_IRQ;
- DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x\n", __func__, idx,
+ DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx " <= 0x%08x", __func__, idx,
addr, val);
if (idx < 0 || idx >= opp->nb_cpus) {
@@ -1146,16 +1146,16 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
case 0x80: /* CTPR */
dst->ctpr = val & 0x0000000F;
- DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d\n",
+ DPRINTF("%s: set CPU %d ctpr to %d, raised %d servicing %d",
__func__, idx, dst->ctpr, dst->raised.priority,
dst->servicing.priority);
if (dst->raised.priority <= dst->ctpr) {
- DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr\n",
+ DPRINTF("%s: Lower OpenPIC INT output cpu %d due to ctpr",
__func__, idx);
qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
} else if (dst->raised.priority > dst->servicing.priority) {
- DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d\n",
+ DPRINTF("%s: Raise OpenPIC INT output cpu %d irq %d",
__func__, idx, dst->raised.next);
qemu_irq_raise(dst->irqs[OPENPIC_OUTPUT_INT]);
}
@@ -1168,11 +1168,11 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
/* Read-only register */
break;
case 0xB0: /* EOI */
- DPRINTF("EOI\n");
+ DPRINTF("EOI");
s_IRQ = IRQ_get_next(opp, &dst->servicing);
if (s_IRQ < 0) {
- DPRINTF("%s: EOI with no interrupt in service\n", __func__);
+ DPRINTF("%s: EOI with no interrupt in service", __func__);
break;
}
@@ -1185,7 +1185,7 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr,
if (n_IRQ != -1 &&
(s_IRQ == -1 ||
IVPR_PRIORITY(src->ivpr) > dst->servicing.priority)) {
- DPRINTF("Raise OpenPIC INT output cpu %d irq %d\n",
+ DPRINTF("Raise OpenPIC INT output cpu %d irq %d",
idx, n_IRQ);
qemu_irq_raise(opp->dst[idx].irqs[OPENPIC_OUTPUT_INT]);
}
@@ -1207,11 +1207,11 @@ static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
IRQSource *src;
int retval, irq;
- DPRINTF("Lower OpenPIC INT output\n");
+ DPRINTF("Lower OpenPIC INT output");
qemu_irq_lower(dst->irqs[OPENPIC_OUTPUT_INT]);
irq = IRQ_get_next(opp, &dst->raised);
- DPRINTF("IACK: irq=%d\n", irq);
+ DPRINTF("IACK: irq=%d", irq);
if (irq == -1) {
/* No more interrupt pending */
@@ -1221,7 +1221,7 @@ static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
src = &opp->src[irq];
if (!(src->ivpr & IVPR_ACTIVITY_MASK) ||
!(IVPR_PRIORITY(src->ivpr) > dst->ctpr)) {
- fprintf(stderr, "%s: bad raised IRQ %d ctpr %d ivpr 0x%08x\n",
+ error_report("%s: bad raised IRQ %d ctpr %d ivpr 0x%08x",
__func__, irq, dst->ctpr, src->ivpr);
openpic_update_irq(opp, irq);
retval = opp->spve;
@@ -1241,7 +1241,7 @@ static uint32_t openpic_iack(OpenPICState *opp, IRQDest *dst, int cpu)
/* Timers and IPIs support multicast. */
if (((irq >= opp->irq_ipi0) && (irq < (opp->irq_ipi0 + OPENPIC_MAX_IPI))) ||
((irq >= opp->irq_tim0) && (irq < (opp->irq_tim0 + OPENPIC_MAX_TMR)))) {
- DPRINTF("irq is IPI or TMR\n");
+ DPRINTF("irq is IPI or TMR");
src->destmask &= ~(1 << cpu);
if (src->destmask && !src->level) {
/* trigger on CPUs that didn't know about it yet */
@@ -1262,7 +1262,7 @@ static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
IRQDest *dst;
uint32_t retval;
- DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx "\n", __func__, idx, addr);
+ DPRINTF("%s: cpu %d addr %#" HWADDR_PRIx, __func__, idx, addr);
retval = 0xFFFFFFFF;
if (idx < 0 || idx >= opp->nb_cpus) {
@@ -1290,7 +1290,7 @@ static uint32_t openpic_cpu_read_internal(void *opaque, hwaddr addr,
default:
break;
}
- DPRINTF("%s: => 0x%08x\n", __func__, retval);
+ DPRINTF("%s: => 0x%08x", __func__, retval);
return retval;
}
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index b298fac7c6..be769186fc 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -1,5 +1,12 @@
# See docs/devel/tracing.txt for syntax documentation.
+# hw/intc/i8259.c
+pic_update_irq(bool master, uint8_t imr, uint8_t irr, uint8_t padd) "master %d imr %"PRIu8" irr %"PRIu8" padd %"PRIu8
+pic_set_irq(bool master, int irq, int level) "master %d irq %d level %d"
+pic_interrupt(int irq, int intno) "irq %d intno %d"
+pic_ioport_write(bool master, uint64_t addr, uint64_t val) "master %d addr 0x%"PRIx64" val 0x%"PRIx64
+pic_ioport_read(bool master, uint64_t addr, int val) "master %d addr 0x%"PRIx64" val 0x%x"
+
# hw/intc/apic_common.c
cpu_set_apic_base(uint64_t val) "0x%016"PRIx64
cpu_get_apic_base(uint64_t val) "0x%016"PRIx64
@@ -64,10 +71,6 @@ xics_ics_simple_set_irq_lsi(int srcno, int nr) "set_irq_lsi: srcno %d [irq 0x%x]
xics_ics_simple_write_xive(int nr, int srcno, int server, uint8_t priority) "ics_write_xive: irq 0x%x [src %d] server 0x%x prio 0x%x"
xics_ics_simple_reject(int nr, int srcno) "reject irq 0x%x [src %d]"
xics_ics_simple_eoi(int nr) "ics_eoi: irq 0x%x"
-xics_alloc(int irq) "irq %d"
-xics_alloc_block(int first, int num, bool lsi, int align) "first irq %d, %d irqs, lsi=%d, alignnum %d"
-xics_ics_free(int src, int irq, int num) "Source#%d, first irq %d, %d irqs"
-xics_ics_free_warn(int src, int irq) "Source#%d, irq %d is already free"
# hw/intc/s390_flic_kvm.c
flic_create_device(int err) "flic: create device failed %d"
diff --git a/hw/intc/xics.c b/hw/intc/xics.c
index a1cc0e420c..e73e623e3b 100644
--- a/hw/intc/xics.c
+++ b/hw/intc/xics.c
@@ -334,7 +334,6 @@ static void icp_realize(DeviceState *dev, Error **errp)
}
cpu = POWERPC_CPU(obj);
- cpu->intc = OBJECT(icp);
icp->cs = CPU(obj);
env = &cpu->env;
@@ -384,6 +383,27 @@ static const TypeInfo icp_info = {
.class_size = sizeof(ICPStateClass),
};
+Object *icp_create(Object *cpu, const char *type, XICSFabric *xi, Error **errp)
+{
+ Error *local_err = NULL;
+ Object *obj;
+
+ obj = object_new(type);
+ object_property_add_child(cpu, type, obj, &error_abort);
+ object_unref(obj);
+ object_property_add_const_link(obj, ICP_PROP_XICS, OBJECT(xi),
+ &error_abort);
+ object_property_add_const_link(obj, ICP_PROP_CPU, cpu, &error_abort);
+ object_property_set_bool(obj, true, "realized", &local_err);
+ if (local_err) {
+ object_unparent(obj);
+ error_propagate(errp, local_err);
+ obj = NULL;
+ }
+
+ return obj;
+}
+
/*
* ICS: Source layer
*/
@@ -693,18 +713,6 @@ static const TypeInfo xics_fabric_info = {
/*
* Exported functions
*/
-qemu_irq xics_get_qirq(XICSFabric *xi, int irq)
-{
- XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
- ICSState *ics = xic->ics_get(xi, irq);
-
- if (ics) {
- return ics->qirqs[irq - ics->offset];
- }
-
- return NULL;
-}
-
ICPState *xics_icp_get(XICSFabric *xi, int server)
{
XICSFabricClass *xic = XICS_FABRIC_GET_CLASS(xi);
diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c
index d98ea8b130..5a0967caf4 100644
--- a/hw/intc/xics_spapr.c
+++ b/hw/intc/xics_spapr.c
@@ -245,122 +245,6 @@ void xics_spapr_init(sPAPRMachineState *spapr)
spapr_register_hypercall(H_IPOLL, h_ipoll);
}
-#define ICS_IRQ_FREE(ics, srcno) \
- (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
-
-static int ics_find_free_block(ICSState *ics, int num, int alignnum)
-{
- int first, i;
-
- for (first = 0; first < ics->nr_irqs; first += alignnum) {
- if (num > (ics->nr_irqs - first)) {
- return -1;
- }
- for (i = first; i < first + num; ++i) {
- if (!ICS_IRQ_FREE(ics, i)) {
- break;
- }
- }
- if (i == (first + num)) {
- return first;
- }
- }
-
- return -1;
-}
-
-int spapr_ics_alloc(ICSState *ics, int irq_hint, bool lsi, Error **errp)
-{
- int irq;
-
- if (!ics) {
- return -1;
- }
- if (irq_hint) {
- if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
- error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
- return -1;
- }
- irq = irq_hint;
- } else {
- irq = ics_find_free_block(ics, 1, 1);
- if (irq < 0) {
- error_setg(errp, "can't allocate IRQ: no IRQ left");
- return -1;
- }
- irq += ics->offset;
- }
-
- ics_set_irq_type(ics, irq - ics->offset, lsi);
- trace_xics_alloc(irq);
-
- return irq;
-}
-
-/*
- * Allocate block of consecutive IRQs, and return the number of the first IRQ in
- * the block. If align==true, aligns the first IRQ number to num.
- */
-int spapr_ics_alloc_block(ICSState *ics, int num, bool lsi,
- bool align, Error **errp)
-{
- int i, first = -1;
-
- if (!ics) {
- return -1;
- }
-
- /*
- * MSIMesage::data is used for storing VIRQ so
- * it has to be aligned to num to support multiple
- * MSI vectors. MSI-X is not affected by this.
- * The hint is used for the first IRQ, the rest should
- * be allocated continuously.
- */
- if (align) {
- assert((num == 1) || (num == 2) || (num == 4) ||
- (num == 8) || (num == 16) || (num == 32));
- first = ics_find_free_block(ics, num, num);
- } else {
- first = ics_find_free_block(ics, num, 1);
- }
- if (first < 0) {
- error_setg(errp, "can't find a free %d-IRQ block", num);
- return -1;
- }
-
- if (first >= 0) {
- for (i = first; i < first + num; ++i) {
- ics_set_irq_type(ics, i, lsi);
- }
- }
- first += ics->offset;
-
- trace_xics_alloc_block(first, num, lsi, align);
-
- return first;
-}
-
-static void ics_free(ICSState *ics, int srcno, int num)
-{
- int i;
-
- for (i = srcno; i < srcno + num; ++i) {
- if (ICS_IRQ_FREE(ics, i)) {
- trace_xics_ics_free_warn(0, i + ics->offset);
- }
- memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
- }
-}
-
-void spapr_ics_free(ICSState *ics, int irq, int num)
-{
- if (ics_valid_irq(ics, irq)) {
- trace_xics_ics_free(0, irq, num);
- ics_free(ics, irq - ics->offset, num);
- }
-}
-
void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle)
{
uint32_t interrupt_server_ranges_prop[] = {
diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c
index 2fcc3d2e7c..e098fd5206 100644
--- a/hw/ipmi/isa_ipmi_bt.c
+++ b/hw/ipmi/isa_ipmi_bt.c
@@ -26,7 +26,6 @@
#include "hw/hw.h"
#include "hw/ipmi/ipmi.h"
#include "hw/isa/isa.h"
-#include "hw/i386/pc.h"
/* Control register */
#define IPMI_BT_CLR_WR_BIT 0
diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c
index 80444977a0..689587b65d 100644
--- a/hw/ipmi/isa_ipmi_kcs.c
+++ b/hw/ipmi/isa_ipmi_kcs.c
@@ -26,7 +26,6 @@
#include "hw/hw.h"
#include "hw/ipmi/ipmi.h"
#include "hw/isa/isa.h"
-#include "hw/i386/pc.h"
#define IPMI_KCS_OBF_BIT 0
#define IPMI_KCS_IBF_BIT 1
diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c
index d20ea4c2ee..a5d67bc6d7 100644
--- a/hw/isa/i82378.c
+++ b/hw/isa/i82378.c
@@ -21,6 +21,7 @@
#include "hw/pci/pci.h"
#include "hw/i386/pc.h"
#include "hw/timer/i8254.h"
+#include "hw/timer/mc146818rtc.h"
#include "hw/audio/pcspk.h"
#define TYPE_I82378 "i82378"
@@ -97,7 +98,7 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
isa_bus_irqs(isabus, s->i8259);
/* 1 82C54 (pit) */
- isa = pit_init(isabus, 0x40, 0, NULL);
+ isa = i8254_pit_init(isabus, 0x40, 0, NULL);
/* speaker */
pcspk_init(isabus, isa);
@@ -106,7 +107,7 @@ static void i82378_realize(PCIDevice *pci, Error **errp)
isa = isa_create_simple(isabus, "i82374");
/* timer */
- isa_create_simple(isabus, "mc146818rtc");
+ isa_create_simple(isabus, TYPE_MC146818_RTC);
}
static void i82378_init(Object *obj)
diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c
index c129985e2a..4084b32be9 100644
--- a/hw/isa/vt82c686.c
+++ b/hw/isa/vt82c686.c
@@ -12,7 +12,6 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/vt82c686.h"
#include "hw/i2c/i2c.h"
#include "hw/i2c/smbus.h"
diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c
index 66eace5a5c..6e74b61cb6 100644
--- a/hw/mem/pc-dimm.c
+++ b/hw/mem/pc-dimm.c
@@ -109,7 +109,6 @@ void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms,
memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr);
vmstate_register_ram(vmstate_mr, dev);
- numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node);
out:
error_propagate(errp, local_err);
@@ -122,7 +121,6 @@ void pc_dimm_memory_unplug(DeviceState *dev, MemoryHotplugState *hpms,
PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
MemoryRegion *vmstate_mr = ddc->get_vmstate_memory_region(dimm);
- numa_unset_mem_node_id(dimm->addr, memory_region_size(mr), dimm->node);
memory_region_del_subregion(&hpms->mr, mr);
vmstate_unregister_ram(vmstate_mr, dev);
}
diff --git a/hw/mips/boston.c b/hw/mips/boston.c
index 1cb4b6aca2..fb23161b33 100644
--- a/hw/mips/boston.c
+++ b/hw/mips/boston.c
@@ -248,16 +248,6 @@ static const MemoryRegionOps boston_platreg_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void boston_flash_write(void *opaque, hwaddr addr,
- uint64_t val, unsigned size)
-{
-}
-
-static const MemoryRegionOps boston_flash_ops = {
- .write = boston_flash_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
-
static const TypeInfo boston_device = {
.name = TYPE_MIPS_BOSTON,
.parent = TYPE_SYS_BUS_DEVICE,
@@ -481,8 +471,8 @@ static void boston_mach_init(MachineState *machine)
sysbus_mmio_map_overlap(SYS_BUS_DEVICE(s->cps), 0, 0, 1);
flash = g_new(MemoryRegion, 1);
- memory_region_init_rom_device_nomigrate(flash, NULL, &boston_flash_ops, s,
- "boston.flash", 128 * M_BYTE, &err);
+ memory_region_init_rom_nomigrate(flash, NULL,
+ "boston.flash", 128 * M_BYTE, &err);
memory_region_add_subregion_overlap(sys_mem, 0x18000000, flash, 0);
ddr = g_new(MemoryRegion, 1);
diff --git a/hw/mips/mips_fulong2e.c b/hw/mips/mips_fulong2e.c
index 146cf0fccd..725e25a134 100644
--- a/hw/mips/mips_fulong2e.c
+++ b/hw/mips/mips_fulong2e.c
@@ -359,13 +359,13 @@ static void mips_fulong2e_init(MachineState *machine)
smbus_eeprom_init(smbus, 1, eeprom_spd, sizeof(eeprom_spd));
/* init other devices */
- pit = pit_init(isa_bus, 0x40, 0, NULL);
+ pit = i8254_pit_init(isa_bus, 0x40, 0, NULL);
DMA_init(isa_bus, 0);
/* Super I/O */
isa_create_simple(isa_bus, "i8042");
- rtc_init(isa_bus, 2000, NULL);
+ mc146818_rtc_init(isa_bus, 2000, NULL);
serial_hds_isa_init(isa_bus, 0, MAX_SERIAL_PORTS);
parallel_hds_isa_init(isa_bus, 1);
diff --git a/hw/mips/mips_jazz.c b/hw/mips/mips_jazz.c
index fe4f17389f..0d2c0683ba 100644
--- a/hw/mips/mips_jazz.c
+++ b/hw/mips/mips_jazz.c
@@ -39,6 +39,7 @@
#include "hw/loader.h"
#include "hw/timer/mc146818rtc.h"
#include "hw/timer/i8254.h"
+#include "hw/display/vga.h"
#include "hw/audio/pcspk.h"
#include "sysemu/block-backend.h"
#include "hw/sysbus.h"
@@ -218,7 +219,7 @@ static void mips_jazz_init(MachineState *machine,
i8259 = i8259_init(isa_bus, env->irq[4]);
isa_bus_irqs(isa_bus, i8259);
DMA_init(isa_bus, 0);
- pit = pit_init(isa_bus, 0x40, 0, NULL);
+ pit = i8254_pit_init(isa_bus, 0x40, 0, NULL);
pcspk_init(isa_bus, pit);
/* Video card */
@@ -288,7 +289,7 @@ static void mips_jazz_init(MachineState *machine,
fdctrl_init_sysbus(qdev_get_gpio_in(rc4030, 1), -1, 0x80003000, fds);
/* Real time clock */
- rtc_init(isa_bus, 1980, NULL);
+ mc146818_rtc_init(isa_bus, 1980, NULL);
memory_region_init_io(rtc, NULL, &rtc_ops, NULL, "rtc", 0x1000);
memory_region_add_subregion(address_space, 0x80004000, rtc);
diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c
index ec6af4a277..37f19428d6 100644
--- a/hw/mips/mips_malta.c
+++ b/hw/mips/mips_malta.c
@@ -1208,13 +1208,13 @@ void mips_malta_init(MachineState *machine)
isa_get_irq(NULL, 9), NULL, 0, NULL);
smbus_eeprom_init(smbus, 8, smbus_eeprom_buf, smbus_eeprom_size);
g_free(smbus_eeprom_buf);
- pit = pit_init(isa_bus, 0x40, 0, NULL);
+ pit = i8254_pit_init(isa_bus, 0x40, 0, NULL);
DMA_init(isa_bus, 0);
/* Super I/O */
isa_create_simple(isa_bus, "i8042");
- rtc_init(isa_bus, 2000, NULL);
+ mc146818_rtc_init(isa_bus, 2000, NULL);
serial_hds_isa_init(isa_bus, 0, 2);
parallel_hds_isa_init(isa_bus, 1);
diff --git a/hw/mips/mips_r4k.c b/hw/mips/mips_r4k.c
index 3bbb1827e1..244bd41813 100644
--- a/hw/mips/mips_r4k.c
+++ b/hw/mips/mips_r4k.c
@@ -18,6 +18,7 @@
#include "hw/char/serial.h"
#include "hw/isa/isa.h"
#include "net/net.h"
+#include "hw/net/ne2000-isa.h"
#include "sysemu/sysemu.h"
#include "hw/boards.h"
#include "hw/block/flash.h"
@@ -270,9 +271,9 @@ void mips_r4k_init(MachineState *machine)
i8259 = i8259_init(isa_bus, env->irq[2]);
isa_bus_irqs(isa_bus, i8259);
- rtc_init(isa_bus, 2000, NULL);
+ mc146818_rtc_init(isa_bus, 2000, NULL);
- pit = pit_init(isa_bus, 0x40, 0, NULL);
+ pit = i8254_pit_init(isa_bus, 0x40, 0, NULL);
serial_hds_isa_init(isa_bus, 0, MAX_SERIAL_PORTS);
diff --git a/hw/misc/Makefile.objs b/hw/misc/Makefile.objs
index 10c88a84b4..d517f83e81 100644
--- a/hw/misc/Makefile.objs
+++ b/hw/misc/Makefile.objs
@@ -11,8 +11,6 @@ common-obj-$(CONFIG_EDU) += edu.o
common-obj-y += unimp.o
common-obj-$(CONFIG_FW_CFG_DMA) += vmcoreinfo.o
-obj-$(CONFIG_VMPORT) += vmport.o
-
# ARM devices
common-obj-$(CONFIG_PL310) += arm_l2x0.o
common-obj-$(CONFIG_INTEGRATOR_DEBUG) += arm_integrator_debug.o
diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c
index 1b421013a3..4fa94835fe 100644
--- a/hw/misc/imx6_ccm.c
+++ b/hw/misc/imx6_ccm.c
@@ -335,7 +335,7 @@ static uint64_t imx6_ccm_get_ipg_clk(IMX6CCMState *dev)
uint64_t freq = 0;
freq = imx6_ccm_get_ahb_clk(dev)
- / (1 + EXTRACT(dev->ccm[CCM_CBCDR], IPG_PODF));;
+ / (1 + EXTRACT(dev->ccm[CCM_CBCDR], IPG_PODF));
DPRINTF("freq = %d\n", (uint32_t)freq);
diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c
index a5a46827fe..4919011f38 100644
--- a/hw/misc/ivshmem.c
+++ b/hw/misc/ivshmem.c
@@ -20,7 +20,6 @@
#include "qapi/error.h"
#include "qemu/cutils.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
diff --git a/hw/misc/pvpanic.c b/hw/misc/pvpanic.c
index 2b1e9a6450..b26250dec9 100644
--- a/hw/misc/pvpanic.c
+++ b/hw/misc/pvpanic.c
@@ -13,14 +13,11 @@
*/
#include "qemu/osdep.h"
-#include "qapi/qmp/qobject.h"
-#include "qapi/qmp/qjson.h"
#include "sysemu/sysemu.h"
#include "qemu/log.h"
#include "hw/nvram/fw_cfg.h"
-#include "hw/i386/pc.h"
-#include "qapi-event.h"
+#include "hw/misc/pvpanic.h"
/* The bit of supported pv event */
#define PVPANIC_F_PANICKED 0
@@ -28,9 +25,8 @@
/* The pv event value */
#define PVPANIC_PANICKED (1 << PVPANIC_F_PANICKED)
-#define TYPE_ISA_PVPANIC_DEVICE "pvpanic"
#define ISA_PVPANIC_DEVICE(obj) \
- OBJECT_CHECK(PVPanicState, (obj), TYPE_ISA_PVPANIC_DEVICE)
+ OBJECT_CHECK(PVPanicState, (obj), TYPE_PVPANIC)
static void handle_event(int event)
{
@@ -107,7 +103,7 @@ static void pvpanic_isa_realizefn(DeviceState *dev, Error **errp)
uint16_t pvpanic_port(void)
{
- Object *o = object_resolve_path_type("", TYPE_ISA_PVPANIC_DEVICE, NULL);
+ Object *o = object_resolve_path_type("", TYPE_PVPANIC, NULL);
if (!o) {
return 0;
}
@@ -129,7 +125,7 @@ static void pvpanic_isa_class_init(ObjectClass *klass, void *data)
}
static TypeInfo pvpanic_isa_info = {
- .name = TYPE_ISA_PVPANIC_DEVICE,
+ .name = TYPE_PVPANIC,
.parent = TYPE_ISA_DEVICE,
.instance_size = sizeof(PVPanicState),
.instance_init = pvpanic_isa_initfn,
diff --git a/hw/misc/sga.c b/hw/misc/sga.c
index 03b006d6f0..97fd63f176 100644
--- a/hw/misc/sga.c
+++ b/hw/misc/sga.c
@@ -26,7 +26,6 @@
*/
#include "qemu/osdep.h"
#include "hw/pci/pci.h"
-#include "hw/i386/pc.h"
#include "hw/loader.h"
#include "sysemu/sysemu.h"
diff --git a/hw/misc/vmcoreinfo.c b/hw/misc/vmcoreinfo.c
index 31db57ab44..a2805527cb 100644
--- a/hw/misc/vmcoreinfo.c
+++ b/hw/misc/vmcoreinfo.c
@@ -35,6 +35,8 @@ static void vmcoreinfo_realize(DeviceState *dev, Error **errp)
{
VMCoreInfoState *s = VMCOREINFO(dev);
FWCfgState *fw_cfg = fw_cfg_find();
+ /* for gdb script dump-guest-memory.py */
+ static VMCoreInfoState * volatile vmcoreinfo_state G_GNUC_UNUSED;
/* Given that this function is executing, there is at least one VMCOREINFO
* device. Check if there are several.
@@ -56,6 +58,7 @@ static void vmcoreinfo_realize(DeviceState *dev, Error **errp)
&s->vmcoreinfo, sizeof(s->vmcoreinfo), false);
qemu_register_reset(vmcoreinfo_reset, dev);
+ vmcoreinfo_state = s;
}
static const VMStateDescription vmstate_vmcoreinfo = {
diff --git a/hw/moxie/moxiesim.c b/hw/moxie/moxiesim.c
index 3ba58481d0..6c200becab 100644
--- a/hw/moxie/moxiesim.c
+++ b/hw/moxie/moxiesim.c
@@ -25,12 +25,12 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "cpu.h"
#include "hw/sysbus.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "net/net.h"
#include "sysemu/sysemu.h"
@@ -41,6 +41,8 @@
#include "elf.h"
#define PHYS_MEM_BASE 0x80000000
+#define FIRMWARE_BASE 0x1000
+#define FIRMWARE_SIZE (128 * 0x1000)
typedef struct {
uint64_t ram_size;
@@ -123,8 +125,8 @@ static void moxiesim_init(MachineState *machine)
memory_region_init_ram(ram, NULL, "moxiesim.ram", ram_size, &error_fatal);
memory_region_add_subregion(address_space_mem, ram_base, ram);
- memory_region_init_ram(rom, NULL, "moxie.rom", 128 * 0x1000, &error_fatal);
- memory_region_add_subregion(get_system_memory(), 0x1000, rom);
+ memory_region_init_ram(rom, NULL, "moxie.rom", FIRMWARE_SIZE, &error_fatal);
+ memory_region_add_subregion(get_system_memory(), FIRMWARE_BASE, rom);
if (kernel_filename) {
loader_params.ram_size = ram_size;
@@ -133,6 +135,11 @@ static void moxiesim_init(MachineState *machine)
loader_params.initrd_filename = initrd_filename;
load_kernel(cpu, &loader_params);
}
+ if (bios_name) {
+ if (load_image_targphys(bios_name, FIRMWARE_BASE, FIRMWARE_SIZE) < 0) {
+ error_report("Failed to load firmware '%s'", bios_name);
+ }
+ }
/* A single 16450 sits at offset 0x3f8. */
if (serial_hds[0]) {
diff --git a/hw/net/e1000.c b/hw/net/e1000.c
index 05a00cba31..804ec08721 100644
--- a/hw/net/e1000.c
+++ b/hw/net/e1000.c
@@ -98,7 +98,10 @@ typedef struct E1000State_st {
unsigned char data[0x10000];
uint16_t size;
unsigned char vlan_needed;
+ unsigned char sum_needed;
+ bool cptse;
e1000x_txd_props props;
+ e1000x_txd_props tso_props;
uint16_t tso_frames;
} tx;
@@ -539,35 +542,37 @@ xmit_seg(E1000State *s)
uint16_t len;
unsigned int frames = s->tx.tso_frames, css, sofar;
struct e1000_tx *tp = &s->tx;
+ struct e1000x_txd_props *props = tp->cptse ? &tp->tso_props : &tp->props;
- if (tp->props.tse && tp->props.cptse) {
- css = tp->props.ipcss;
+ if (tp->cptse) {
+ css = props->ipcss;
DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
frames, tp->size, css);
- if (tp->props.ip) { /* IPv4 */
+ if (props->ip) { /* IPv4 */
stw_be_p(tp->data+css+2, tp->size - css);
stw_be_p(tp->data+css+4,
lduw_be_p(tp->data + css + 4) + frames);
} else { /* IPv6 */
stw_be_p(tp->data+css+4, tp->size - css);
}
- css = tp->props.tucss;
+ css = props->tucss;
len = tp->size - css;
- DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->props.tcp, css, len);
- if (tp->props.tcp) {
- sofar = frames * tp->props.mss;
+ DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", props->tcp, css, len);
+ if (props->tcp) {
+ sofar = frames * props->mss;
stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
- if (tp->props.paylen - sofar > tp->props.mss) {
+ if (props->paylen - sofar > props->mss) {
tp->data[css + 13] &= ~9; /* PSH, FIN */
} else if (frames) {
e1000x_inc_reg_if_not_full(s->mac_reg, TSCTC);
}
- } else /* UDP */
+ } else { /* UDP */
stw_be_p(tp->data+css+4, len);
- if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) {
+ }
+ if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
unsigned int phsum;
// add pseudo-header length before checksum calculation
- void *sp = tp->data + tp->props.tucso;
+ void *sp = tp->data + props->tucso;
phsum = lduw_be_p(sp) + len;
phsum = (phsum >> 16) + (phsum & 0xffff);
@@ -576,13 +581,11 @@ xmit_seg(E1000State *s)
tp->tso_frames++;
}
- if (tp->props.sum_needed & E1000_TXD_POPTS_TXSM) {
- putsum(tp->data, tp->size, tp->props.tucso,
- tp->props.tucss, tp->props.tucse);
+ if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
+ putsum(tp->data, tp->size, props->tucso, props->tucss, props->tucse);
}
- if (tp->props.sum_needed & E1000_TXD_POPTS_IXSM) {
- putsum(tp->data, tp->size, tp->props.ipcso,
- tp->props.ipcss, tp->props.ipcse);
+ if (tp->sum_needed & E1000_TXD_POPTS_IXSM) {
+ putsum(tp->data, tp->size, props->ipcso, props->ipcss, props->ipcse);
}
if (tp->vlan_needed) {
memmove(tp->vlan, tp->data, 4);
@@ -614,27 +617,27 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
- e1000x_read_tx_ctx_descr(xp, &tp->props);
- tp->tso_frames = 0;
- if (tp->props.tucso == 0) { /* this is probably wrong */
- DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
- tp->props.tucso = tp->props.tucss + (tp->props.tcp ? 16 : 6);
+ if (le32_to_cpu(xp->cmd_and_length) & E1000_TXD_CMD_TSE) {
+ e1000x_read_tx_ctx_descr(xp, &tp->tso_props);
+ tp->tso_frames = 0;
+ } else {
+ e1000x_read_tx_ctx_descr(xp, &tp->props);
}
return;
} else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
// data descriptor
if (tp->size == 0) {
- tp->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8;
+ tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
}
- tp->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
+ tp->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
} else {
// legacy descriptor
- tp->props.cptse = 0;
+ tp->cptse = 0;
}
if (e1000x_vlan_enabled(s->mac_reg) &&
e1000x_is_vlan_txd(txd_lower) &&
- (tp->props.cptse || txd_lower & E1000_TXD_CMD_EOP)) {
+ (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
tp->vlan_needed = 1;
stw_be_p(tp->vlan_header,
le16_to_cpu(s->mac_reg[VET]));
@@ -643,8 +646,8 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
}
addr = le64_to_cpu(dp->buffer_addr);
- if (tp->props.tse && tp->props.cptse) {
- msh = tp->props.hdr_len + tp->props.mss;
+ if (tp->cptse) {
+ msh = tp->tso_props.hdr_len + tp->tso_props.mss;
do {
bytes = split_size;
if (tp->size + bytes > msh)
@@ -653,21 +656,19 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
bytes = MIN(sizeof(tp->data) - tp->size, bytes);
pci_dma_read(d, addr, tp->data + tp->size, bytes);
sz = tp->size + bytes;
- if (sz >= tp->props.hdr_len && tp->size < tp->props.hdr_len) {
- memmove(tp->header, tp->data, tp->props.hdr_len);
+ if (sz >= tp->tso_props.hdr_len
+ && tp->size < tp->tso_props.hdr_len) {
+ memmove(tp->header, tp->data, tp->tso_props.hdr_len);
}
tp->size = sz;
addr += bytes;
if (sz == msh) {
xmit_seg(s);
- memmove(tp->data, tp->header, tp->props.hdr_len);
- tp->size = tp->props.hdr_len;
+ memmove(tp->data, tp->header, tp->tso_props.hdr_len);
+ tp->size = tp->tso_props.hdr_len;
}
split_size -= bytes;
} while (bytes && split_size);
- } else if (!tp->props.tse && tp->props.cptse) {
- // context descriptor TSE is not set, while data descriptor TSE is set
- DBGOUT(TXERR, "TCP segmentation error\n");
} else {
split_size = MIN(sizeof(tp->data) - tp->size, split_size);
pci_dma_read(d, addr, tp->data + tp->size, split_size);
@@ -676,14 +677,14 @@ process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
if (!(txd_lower & E1000_TXD_CMD_EOP))
return;
- if (!(tp->props.tse && tp->props.cptse && tp->size < tp->props.hdr_len)) {
+ if (!(tp->cptse && tp->size < tp->tso_props.hdr_len)) {
xmit_seg(s);
}
tp->tso_frames = 0;
- tp->props.sum_needed = 0;
+ tp->sum_needed = 0;
tp->vlan_needed = 0;
tp->size = 0;
- tp->props.cptse = 0;
+ tp->cptse = 0;
}
static uint32_t
@@ -1435,7 +1436,7 @@ static const VMStateDescription vmstate_e1000_full_mac_state = {
static const VMStateDescription vmstate_e1000 = {
.name = "e1000",
- .version_id = 2,
+ .version_id = 3,
.minimum_version_id = 1,
.pre_save = e1000_pre_save,
.post_load = e1000_post_load,
@@ -1461,7 +1462,7 @@ static const VMStateDescription vmstate_e1000 = {
VMSTATE_UINT16(tx.props.mss, E1000State),
VMSTATE_UINT16(tx.size, E1000State),
VMSTATE_UINT16(tx.tso_frames, E1000State),
- VMSTATE_UINT8(tx.props.sum_needed, E1000State),
+ VMSTATE_UINT8(tx.sum_needed, E1000State),
VMSTATE_INT8(tx.props.ip, E1000State),
VMSTATE_INT8(tx.props.tcp, E1000State),
VMSTATE_BUFFER(tx.header, E1000State),
@@ -1508,6 +1509,17 @@ static const VMStateDescription vmstate_e1000 = {
VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
+ VMSTATE_UINT8_V(tx.tso_props.ipcss, E1000State, 3),
+ VMSTATE_UINT8_V(tx.tso_props.ipcso, E1000State, 3),
+ VMSTATE_UINT16_V(tx.tso_props.ipcse, E1000State, 3),
+ VMSTATE_UINT8_V(tx.tso_props.tucss, E1000State, 3),
+ VMSTATE_UINT8_V(tx.tso_props.tucso, E1000State, 3),
+ VMSTATE_UINT16_V(tx.tso_props.tucse, E1000State, 3),
+ VMSTATE_UINT32_V(tx.tso_props.paylen, E1000State, 3),
+ VMSTATE_UINT8_V(tx.tso_props.hdr_len, E1000State, 3),
+ VMSTATE_UINT16_V(tx.tso_props.mss, E1000State, 3),
+ VMSTATE_INT8_V(tx.tso_props.ip, E1000State, 3),
+ VMSTATE_INT8_V(tx.tso_props.tcp, E1000State, 3),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription*[]) {
diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c
index f1af279e8d..191398a3d5 100644
--- a/hw/net/e1000e.c
+++ b/hw/net/e1000e.c
@@ -556,7 +556,7 @@ static const VMStateDescription e1000e_vmstate_tx = {
.version_id = 1,
.minimum_version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_UINT8(props.sum_needed, struct e1000e_tx),
+ VMSTATE_UINT8(sum_needed, struct e1000e_tx),
VMSTATE_UINT8(props.ipcss, struct e1000e_tx),
VMSTATE_UINT8(props.ipcso, struct e1000e_tx),
VMSTATE_UINT16(props.ipcse, struct e1000e_tx),
@@ -569,7 +569,7 @@ static const VMStateDescription e1000e_vmstate_tx = {
VMSTATE_INT8(props.ip, struct e1000e_tx),
VMSTATE_INT8(props.tcp, struct e1000e_tx),
VMSTATE_BOOL(props.tse, struct e1000e_tx),
- VMSTATE_BOOL(props.cptse, struct e1000e_tx),
+ VMSTATE_BOOL(cptse, struct e1000e_tx),
VMSTATE_BOOL(skip_cp, struct e1000e_tx),
VMSTATE_END_OF_LIST()
}
diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c
index 43a8d89955..c93c4661ed 100644
--- a/hw/net/e1000e_core.c
+++ b/hw/net/e1000e_core.c
@@ -632,18 +632,18 @@ e1000e_rss_parse_packet(E1000ECore *core,
static void
e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx)
{
- if (tx->props.tse && tx->props.cptse) {
+ if (tx->props.tse && tx->cptse) {
net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss);
net_tx_pkt_update_ip_checksums(tx->tx_pkt);
e1000x_inc_reg_if_not_full(core->mac, TSCTC);
return;
}
- if (tx->props.sum_needed & E1000_TXD_POPTS_TXSM) {
+ if (tx->sum_needed & E1000_TXD_POPTS_TXSM) {
net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0);
}
- if (tx->props.sum_needed & E1000_TXD_POPTS_IXSM) {
+ if (tx->sum_needed & E1000_TXD_POPTS_IXSM) {
net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
}
}
@@ -715,13 +715,13 @@ e1000e_process_tx_desc(E1000ECore *core,
return;
} else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
/* data descriptor */
- tx->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8;
- tx->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
+ tx->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
+ tx->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
e1000e_process_ts_option(core, dp);
} else {
/* legacy descriptor */
e1000e_process_ts_option(core, dp);
- tx->props.cptse = 0;
+ tx->cptse = 0;
}
addr = le64_to_cpu(dp->buffer_addr);
@@ -747,8 +747,8 @@ e1000e_process_tx_desc(E1000ECore *core,
tx->skip_cp = false;
net_tx_pkt_reset(tx->tx_pkt);
- tx->props.sum_needed = 0;
- tx->props.cptse = 0;
+ tx->sum_needed = 0;
+ tx->cptse = 0;
}
}
diff --git a/hw/net/e1000e_core.h b/hw/net/e1000e_core.h
index 1ff6978ca1..7d8ff41890 100644
--- a/hw/net/e1000e_core.h
+++ b/hw/net/e1000e_core.h
@@ -71,6 +71,8 @@ struct E1000Core {
e1000x_txd_props props;
bool skip_cp;
+ unsigned char sum_needed;
+ bool cptse;
struct NetTxPkt *tx_pkt;
} tx[E1000E_NUM_QUEUES];
diff --git a/hw/net/e1000x_common.h b/hw/net/e1000x_common.h
index 3072ce9d50..0268884e72 100644
--- a/hw/net/e1000x_common.h
+++ b/hw/net/e1000x_common.h
@@ -193,7 +193,6 @@ void e1000x_update_regs_on_autoneg_done(uint32_t *mac, uint16_t *phy);
void e1000x_increase_size_stats(uint32_t *mac, const int *size_regs, int size);
typedef struct e1000x_txd_props {
- unsigned char sum_needed;
uint8_t ipcss;
uint8_t ipcso;
uint16_t ipcse;
@@ -206,7 +205,6 @@ typedef struct e1000x_txd_props {
int8_t ip;
int8_t tcp;
bool tse;
- bool cptse;
} e1000x_txd_props;
void e1000x_read_tx_ctx_descr(struct e1000_context_desc *d,
diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c
index 1c0def555b..a07a63247e 100644
--- a/hw/net/eepro100.c
+++ b/hw/net/eepro100.c
@@ -44,6 +44,7 @@
#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "net/net.h"
+#include "net/eth.h"
#include "hw/nvram/eeprom93xx.h"
#include "sysemu/sysemu.h"
#include "sysemu/dma.h"
@@ -323,32 +324,8 @@ static const uint16_t eepro100_mdi_mask[] = {
0xffff, 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
-#define POLYNOMIAL 0x04c11db6
-
static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s);
-/* From FreeBSD (locally modified). */
-static unsigned e100_compute_mcast_idx(const uint8_t *ep)
-{
- uint32_t crc;
- int carry, i, j;
- uint8_t b;
-
- crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- b = *ep++;
- for (j = 0; j < 8; j++) {
- carry = ((crc & 0x80000000L) ? 1 : 0) ^ (b & 0x01);
- crc <<= 1;
- b >>= 1;
- if (carry) {
- crc = ((crc ^ POLYNOMIAL) | carry);
- }
- }
- }
- return (crc & BITS(7, 2)) >> 2;
-}
-
/* Read a 16 bit control/status (CSR) register. */
static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr)
{
@@ -845,7 +822,8 @@ static void set_multicast_list(EEPRO100State *s)
uint8_t multicast_addr[6];
pci_dma_read(&s->dev, s->cb_address + 10 + i, multicast_addr, 6);
TRACE(OTHER, logout("multicast entry %s\n", nic_dump(multicast_addr, 6)));
- unsigned mcast_idx = e100_compute_mcast_idx(multicast_addr);
+ unsigned mcast_idx = (net_crc32(multicast_addr, ETH_ALEN) &
+ BITS(7, 2)) >> 2;
assert(mcast_idx < 64);
s->mult[mcast_idx >> 3] |= (1 << (mcast_idx & 7));
}
@@ -1681,7 +1659,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
if (s->configuration[21] & BIT(3)) {
/* Multicast all bit is set, receive all multicast frames. */
} else {
- unsigned mcast_idx = e100_compute_mcast_idx(buf);
+ unsigned mcast_idx = (net_crc32(buf, ETH_ALEN) & BITS(7, 2)) >> 2;
assert(mcast_idx < 64);
if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) {
/* Multicast frame is allowed in hash table. */
@@ -1701,7 +1679,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
rfd_status |= 0x0004;
} else if (s->configuration[20] & BIT(6)) {
/* Multiple IA bit set. */
- unsigned mcast_idx = compute_mcast_idx(buf);
+ unsigned mcast_idx = net_crc32(buf, ETH_ALEN) >> 26;
assert(mcast_idx < 64);
if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) {
TRACE(RXTX, logout("%p accepted, multiple IA bit set\n", s));
diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c
index 3c36ab9cec..704f452067 100644
--- a/hw/net/ftgmac100.c
+++ b/hw/net/ftgmac100.c
@@ -762,7 +762,7 @@ static int ftgmac100_filter(FTGMAC100State *s, const uint8_t *buf, size_t len)
}
/* TODO: this does not seem to work for ftgmac100 */
- mcast_idx = compute_mcast_idx(buf);
+ mcast_idx = net_crc32(buf, ETH_ALEN) >> 26;
if (!(s->math[mcast_idx / 32] & (1 << (mcast_idx % 32)))) {
return 0;
}
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 3db8937cac..b9032dac59 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -13,6 +13,7 @@
#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "net/net.h"
+#include "net/eth.h"
#include "hw/devices.h"
#include "sysemu/sysemu.h"
#include "hw/ptimer.h"
@@ -504,7 +505,7 @@ static int lan9118_filter(lan9118_state *s, const uint8_t *addr)
}
} else {
/* Hash matching */
- hash = compute_mcast_idx(addr);
+ hash = net_crc32(addr, ETH_ALEN) >> 26;
if (hash & 0x20) {
return (s->mac_hashh >> (hash & 0x1f)) & 1;
} else {
diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c
index f3455339ee..70e5c1d3d4 100644
--- a/hw/net/ne2000-isa.c
+++ b/hw/net/ne2000-isa.c
@@ -22,17 +22,15 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
+#include "hw/net/ne2000-isa.h"
#include "hw/qdev.h"
-#include "net/net.h"
#include "ne2000.h"
+#include "sysemu/sysemu.h"
#include "exec/address-spaces.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
-#define TYPE_ISA_NE2000 "ne2k_isa"
#define ISA_NE2000(obj) OBJECT_CHECK(ISANE2000State, (obj), TYPE_ISA_NE2000)
typedef struct ISANE2000State {
diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c
index 3938e6ddd8..687ef84aac 100644
--- a/hw/net/ne2000.c
+++ b/hw/net/ne2000.c
@@ -22,9 +22,9 @@
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "hw/hw.h"
#include "hw/pci/pci.h"
#include "net/net.h"
+#include "net/eth.h"
#include "ne2000.h"
#include "hw/loader.h"
#include "sysemu/sysemu.h"
@@ -201,7 +201,7 @@ ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_)
/* multicast */
if (!(s->rxcr & 0x08))
return size;
- mcast_idx = compute_mcast_idx(buf);
+ mcast_idx = net_crc32(buf, ETH_ALEN) >> 26;
if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))))
return size;
} else if (s->mem[0] == buf[0] &&
diff --git a/hw/net/ne2000.h b/hw/net/ne2000.h
index d213dccae3..adb8021bd1 100644
--- a/hw/net/ne2000.h
+++ b/hw/net/ne2000.h
@@ -1,6 +1,9 @@
#ifndef HW_NE2000_H
#define HW_NE2000_H
+#include "hw/hw.h"
+#include "net/net.h"
+
#define NE2000_PMEM_SIZE (32*1024)
#define NE2000_PMEM_START (16*1024)
#define NE2000_PMEM_END (NE2000_PMEM_SIZE+NE2000_PMEM_START)
diff --git a/hw/net/opencores_eth.c b/hw/net/opencores_eth.c
index 268d6a7892..d42b79c08c 100644
--- a/hw/net/opencores_eth.c
+++ b/hw/net/opencores_eth.c
@@ -36,6 +36,7 @@
#include "hw/net/mii.h"
#include "hw/sysbus.h"
#include "net/net.h"
+#include "net/eth.h"
#include "sysemu/sysemu.h"
#include "trace.h"
@@ -373,7 +374,7 @@ static ssize_t open_eth_receive(NetClientState *nc,
if (memcmp(buf, bcast_addr, sizeof(bcast_addr)) == 0) {
miss = GET_REGBIT(s, MODER, BRO);
} else if ((buf[0] & 0x1) || GET_REGBIT(s, MODER, IAM)) {
- unsigned mcast_idx = compute_mcast_idx(buf);
+ unsigned mcast_idx = net_crc32(buf, ETH_ALEN) >> 26;
miss = !(s->regs[HASH0 + mcast_idx / 32] &
(1 << (mcast_idx % 32)));
trace_open_eth_receive_mcast(
diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c
index 654455355f..39d5d93525 100644
--- a/hw/net/pcnet.c
+++ b/hw/net/pcnet.c
@@ -38,6 +38,7 @@
#include "qemu/osdep.h"
#include "hw/qdev.h"
#include "net/net.h"
+#include "net/eth.h"
#include "qemu/timer.h"
#include "qemu/sockets.h"
#include "sysemu/sysemu.h"
@@ -522,25 +523,6 @@ static inline void pcnet_rmd_store(PCNetState *s, struct pcnet_RMD *rmd,
be16_to_cpu(hdr->ether_type)); \
} while (0)
-#define MULTICAST_FILTER_LEN 8
-
-static inline uint32_t lnc_mchash(const uint8_t *ether_addr)
-{
-#define LNC_POLYNOMIAL 0xEDB88320UL
- uint32_t crc = 0xFFFFFFFF;
- int idx, bit;
- uint8_t data;
-
- for (idx = 0; idx < 6; idx++) {
- for (data = *ether_addr++, bit = 0; bit < MULTICAST_FILTER_LEN; bit++) {
- crc = (crc >> 1) ^ (((crc ^ data) & 1) ? LNC_POLYNOMIAL : 0);
- data >>= 1;
- }
- }
- return crc;
-#undef LNC_POLYNOMIAL
-}
-
#define CRC(crc, ch) (crc = (crc >> 8) ^ crctab[(crc ^ (ch)) & 0xff])
/* generated using the AUTODIN II polynomial
@@ -656,7 +638,7 @@ static inline int ladr_match(PCNetState *s, const uint8_t *buf, int size)
s->csr[10] & 0xff, s->csr[10] >> 8,
s->csr[11] & 0xff, s->csr[11] >> 8
};
- int index = lnc_mchash(hdr->ether_dhost) >> 26;
+ int index = net_crc32_le(hdr->ether_dhost, ETH_ALEN) >> 26;
return !!(ladr[index >> 3] & (1 << (index & 7)));
}
return 0;
diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c
index a6b2a9f7a4..1cc95b8cba 100644
--- a/hw/net/rtl8139.c
+++ b/hw/net/rtl8139.c
@@ -882,7 +882,7 @@ static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t
return size;
}
- int mcast_idx = compute_mcast_idx(buf);
+ int mcast_idx = net_crc32(buf, ETH_ALEN) >> 26;
if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))))
{
diff --git a/hw/net/sungem.c b/hw/net/sungem.c
index 6aa8d1117b..60f1e479f3 100644
--- a/hw/net/sungem.c
+++ b/hw/net/sungem.c
@@ -11,12 +11,11 @@
#include "hw/pci/pci.h"
#include "qemu/log.h"
#include "net/net.h"
+#include "net/eth.h"
#include "net/checksum.h"
#include "hw/net/mii.h"
#include "sysemu/sysemu.h"
#include "trace.h"
-/* For crc32 */
-#include <zlib.h>
#define TYPE_SUNGEM "sungem"
@@ -595,7 +594,7 @@ static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf,
}
/* Get MAC crc */
- mac_crc = crc32(~0, buf, 6);
+ mac_crc = net_crc32_le(buf, ETH_ALEN);
/* Packet isn't for me ? */
rx_cond = sungem_check_rx_mac(s, buf, mac_crc);
diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c
index b1efa1b88d..7558fca8f9 100644
--- a/hw/net/sunhme.c
+++ b/hw/net/sunhme.c
@@ -698,29 +698,6 @@ static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
s->erxregs[HME_ERXI_RING >> 2] = ring;
}
-#define POLYNOMIAL_LE 0xedb88320
-static uint32_t sunhme_crc32_le(const uint8_t *p, int len)
-{
- uint32_t crc;
- int carry, i, j;
- uint8_t b;
-
- crc = 0xffffffff;
- for (i = 0; i < len; i++) {
- b = *p++;
- for (j = 0; j < 8; j++) {
- carry = (crc & 0x1) ^ (b & 0x01);
- crc >>= 1;
- b >>= 1;
- if (carry) {
- crc = crc ^ POLYNOMIAL_LE;
- }
- }
- }
-
- return crc;
-}
-
#define MIN_BUF_SIZE 60
static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
@@ -761,7 +738,7 @@ static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
trace_sunhme_rx_filter_bcast_match();
} else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
/* Didn't match local address, check hash filter */
- int mcast_idx = sunhme_crc32_le(buf, 6) >> 26;
+ int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
(1 << (mcast_idx & 0xf)))) {
/* Didn't match hash filter */
diff --git a/hw/nios2/boot.c b/hw/nios2/boot.c
index 2b31f5b844..94f436e7fb 100644
--- a/hw/nios2/boot.c
+++ b/hw/nios2/boot.c
@@ -34,7 +34,6 @@
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
-#include "qemu-common.h"
#include "sysemu/device_tree.h"
#include "sysemu/sysemu.h"
#include "hw/loader.h"
diff --git a/hw/nvram/Makefile.objs b/hw/nvram/Makefile.objs
index c018f6b2ff..0f4ee71dcb 100644
--- a/hw/nvram/Makefile.objs
+++ b/hw/nvram/Makefile.objs
@@ -1,5 +1,6 @@
common-obj-$(CONFIG_DS1225Y) += ds1225y.o
common-obj-y += eeprom93xx.o
+common-obj-y += eeprom_at24c.o
common-obj-y += fw_cfg.o
common-obj-y += chrp_nvram.o
common-obj-$(CONFIG_MAC_NVRAM) += mac_nvram.o
diff --git a/hw/nvram/eeprom_at24c.c b/hw/nvram/eeprom_at24c.c
new file mode 100644
index 0000000000..efa3621ac6
--- /dev/null
+++ b/hw/nvram/eeprom_at24c.c
@@ -0,0 +1,205 @@
+/*
+ * *AT24C* series I2C EEPROM
+ *
+ * Copyright (c) 2015 Michael Davidsaver
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the LICENSE file in the top-level directory.
+ */
+
+#include <string.h>
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/hw.h"
+#include "hw/i2c/i2c.h"
+#include "sysemu/block-backend.h"
+
+/* #define DEBUG_AT24C */
+
+#ifdef DEBUG_AT24C
+#define DPRINTK(FMT, ...) printf(TYPE_AT24C_EE " : " FMT, ## __VA_ARGS__)
+#else
+#define DPRINTK(FMT, ...) do {} while (0)
+#endif
+
+#define ERR(FMT, ...) fprintf(stderr, TYPE_AT24C_EE " : " FMT, \
+ ## __VA_ARGS__)
+
+#define TYPE_AT24C_EE "at24c-eeprom"
+#define AT24C_EE(obj) OBJECT_CHECK(EEPROMState, (obj), TYPE_AT24C_EE)
+
+typedef struct EEPROMState {
+ I2CSlave parent_obj;
+
+ /* address counter */
+ uint16_t cur;
+ /* total size in bytes */
+ uint32_t rsize;
+ bool writable;
+ /* cells changed since last START? */
+ bool changed;
+ /* during WRITE, # of address bytes transfered */
+ uint8_t haveaddr;
+
+ uint8_t *mem;
+
+ BlockBackend *blk;
+} EEPROMState;
+
+static
+int at24c_eeprom_event(I2CSlave *s, enum i2c_event event)
+{
+ EEPROMState *ee = container_of(s, EEPROMState, parent_obj);
+
+ switch (event) {
+ case I2C_START_SEND:
+ case I2C_START_RECV:
+ case I2C_FINISH:
+ ee->haveaddr = 0;
+ DPRINTK("clear\n");
+ if (ee->blk && ee->changed) {
+ int len = blk_pwrite(ee->blk, 0, ee->mem, ee->rsize, 0);
+ if (len != ee->rsize) {
+ ERR(TYPE_AT24C_EE
+ " : failed to write backing file\n");
+ }
+ DPRINTK("Wrote to backing file\n");
+ }
+ ee->changed = false;
+ break;
+ case I2C_NACK:
+ break;
+ }
+ return 0;
+}
+
+static
+int at24c_eeprom_recv(I2CSlave *s)
+{
+ EEPROMState *ee = AT24C_EE(s);
+ int ret;
+
+ ret = ee->mem[ee->cur];
+
+ ee->cur = (ee->cur + 1u) % ee->rsize;
+ DPRINTK("Recv %02x %c\n", ret, ret);
+
+ return ret;
+}
+
+static
+int at24c_eeprom_send(I2CSlave *s, uint8_t data)
+{
+ EEPROMState *ee = AT24C_EE(s);
+
+ if (ee->haveaddr < 2) {
+ ee->cur <<= 8;
+ ee->cur |= data;
+ ee->haveaddr++;
+ if (ee->haveaddr == 2) {
+ ee->cur %= ee->rsize;
+ DPRINTK("Set pointer %04x\n", ee->cur);
+ }
+
+ } else {
+ if (ee->writable) {
+ DPRINTK("Send %02x\n", data);
+ ee->mem[ee->cur] = data;
+ ee->changed = true;
+ } else {
+ DPRINTK("Send error %02x read-only\n", data);
+ }
+ ee->cur = (ee->cur + 1u) % ee->rsize;
+
+ }
+
+ return 0;
+}
+
+static
+int at24c_eeprom_init(I2CSlave *i2c)
+{
+ EEPROMState *ee = AT24C_EE(i2c);
+
+ ee->mem = g_malloc0(ee->rsize);
+
+ if (ee->blk) {
+ int64_t len = blk_getlength(ee->blk);
+
+ if (len != ee->rsize) {
+ ERR(TYPE_AT24C_EE " : Backing file size %lu != %u\n",
+ (unsigned long)len, (unsigned)ee->rsize);
+ exit(1);
+ }
+
+ if (blk_set_perm(ee->blk, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE,
+ BLK_PERM_ALL, &error_fatal) < 0)
+ {
+ ERR(TYPE_AT24C_EE
+ " : Backing file incorrect permission\n");
+ exit(1);
+ }
+ }
+ return 0;
+}
+
+static
+void at24c_eeprom_reset(DeviceState *state)
+{
+ EEPROMState *ee = AT24C_EE(state);
+
+ ee->changed = false;
+ ee->cur = 0;
+ ee->haveaddr = 0;
+
+ memset(ee->mem, 0, ee->rsize);
+
+ if (ee->blk) {
+ int len = blk_pread(ee->blk, 0, ee->mem, ee->rsize);
+
+ if (len != ee->rsize) {
+ ERR(TYPE_AT24C_EE
+ " : Failed initial sync with backing file\n");
+ }
+ DPRINTK("Reset read backing file\n");
+ }
+}
+
+static Property at24c_eeprom_props[] = {
+ DEFINE_PROP_UINT32("rom-size", EEPROMState, rsize, 0),
+ DEFINE_PROP_BOOL("writable", EEPROMState, writable, true),
+ DEFINE_PROP_DRIVE("drive", EEPROMState, blk),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static
+void at24c_eeprom_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ I2CSlaveClass *k = I2C_SLAVE_CLASS(klass);
+
+ k->init = &at24c_eeprom_init;
+ k->event = &at24c_eeprom_event;
+ k->recv = &at24c_eeprom_recv;
+ k->send = &at24c_eeprom_send;
+
+ dc->props = at24c_eeprom_props;
+ dc->reset = at24c_eeprom_reset;
+}
+
+static
+const TypeInfo at24c_eeprom_type = {
+ .name = TYPE_AT24C_EE,
+ .parent = TYPE_I2C_SLAVE,
+ .instance_size = sizeof(EEPROMState),
+ .class_size = sizeof(I2CSlaveClass),
+ .class_init = at24c_eeprom_class_init,
+};
+
+static void at24c_eeprom_register(void)
+{
+ type_register_static(&at24c_eeprom_type);
+}
+
+type_init(at24c_eeprom_register)
diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c
index 8c8ac737ad..9e799dc10f 100644
--- a/hw/pci-bridge/pci_expander_bridge.c
+++ b/hw/pci-bridge/pci_expander_bridge.c
@@ -16,7 +16,6 @@
#include "hw/pci/pci_bus.h"
#include "hw/pci/pci_host.h"
#include "hw/pci/pci_bridge.h"
-#include "hw/i386/pc.h"
#include "qemu/range.h"
#include "qemu/error-report.h"
#include "sysemu/numa.h"
diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c
index 39cd24464d..279badc894 100644
--- a/hw/pci-host/ppce500.c
+++ b/hw/pci-host/ppce500.c
@@ -423,11 +423,6 @@ static void e500_pcihost_bridge_realize(PCIDevice *d, Error **errp)
PPCE500CCSRState *ccsr = CCSR(container_get(qdev_get_machine(),
"/e500-ccsr"));
- pci_config_set_class(d->config, PCI_CLASS_BRIDGE_PCI);
- d->config[PCI_HEADER_TYPE] =
- (d->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) |
- PCI_HEADER_TYPE_BRIDGE;
-
memory_region_init_alias(&b->bar0, OBJECT(ccsr), "e500-pci-bar0", &ccsr->ccsr_space,
0, int128_get64(ccsr->ccsr_space.size));
pci_register_bar(d, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &b->bar0);
diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index 5cf0dabef3..c4fe06ea2a 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -685,6 +685,8 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500Params *params,
int i, j, k;
dev = qdev_create(NULL, TYPE_OPENPIC);
+ object_property_add_child(qdev_get_machine(), "pic", OBJECT(dev),
+ &error_fatal);
qdev_prop_set_uint32(dev, "model", params->mpic_version);
qdev_prop_set_uint32(dev, "nb_cpus", smp_cpus);
@@ -884,6 +886,8 @@ void ppce500_init(MachineState *machine, PPCE500Params *params)
/* PCI */
dev = qdev_create(NULL, "e500-pcihost");
+ object_property_add_child(qdev_get_machine(), "pci-host", OBJECT(dev),
+ &error_abort);
qdev_prop_set_uint32(dev, "first_slot", params->pci_first_slot);
qdev_prop_set_uint32(dev, "first_pin_irq", pci_irq_nrs[0]);
qdev_init_nofail(dev);
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index c35c439d81..94ffc8e137 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -655,7 +655,7 @@ static void ppc_powernv_init(MachineState *machine)
serial_hds_isa_init(pnv->isa_bus, 0, MAX_SERIAL_PORTS);
/* Create an RTC ISA device too */
- rtc_init(pnv->isa_bus, 2000, NULL);
+ mc146818_rtc_init(pnv->isa_bus, 2000, NULL);
/* OpenPOWER systems use a IPMI SEL Event message to notify the
* host to powerdown */
diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c
index 82ff440b33..03317db853 100644
--- a/hw/ppc/pnv_core.c
+++ b/hw/ppc/pnv_core.c
@@ -126,7 +126,6 @@ static void pnv_core_realize_child(Object *child, XICSFabric *xi, Error **errp)
Error *local_err = NULL;
CPUState *cs = CPU(child);
PowerPCCPU *cpu = POWERPC_CPU(cs);
- Object *obj;
object_property_set_bool(child, true, "realized", &local_err);
if (local_err) {
@@ -134,13 +133,7 @@ static void pnv_core_realize_child(Object *child, XICSFabric *xi, Error **errp)
return;
}
- obj = object_new(TYPE_PNV_ICP);
- object_property_add_child(child, "icp", obj, NULL);
- object_unref(obj);
- object_property_add_const_link(obj, ICP_PROP_XICS, OBJECT(xi),
- &error_abort);
- object_property_add_const_link(obj, ICP_PROP_CPU, child, &error_abort);
- object_property_set_bool(obj, true, "realized", &local_err);
+ cpu->intc = icp_create(child, TYPE_PNV_ICP, xi, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -148,7 +141,6 @@ static void pnv_core_realize_child(Object *child, XICSFabric *xi, Error **errp)
powernv_cpu_init(cpu, &local_err);
if (local_err) {
- object_unparent(obj);
error_propagate(errp, local_err);
return;
}
diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c
index 6f8accc397..af08ac319a 100644
--- a/hw/ppc/prep.c
+++ b/hw/ppc/prep.c
@@ -42,6 +42,7 @@
#include "hw/loader.h"
#include "hw/timer/mc146818rtc.h"
#include "hw/isa/pc87312.h"
+#include "hw/net/ne2000-isa.h"
#include "sysemu/block-backend.h"
#include "sysemu/arch_init.h"
#include "sysemu/kvm.h"
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 1ac7eb0f8c..6785a90c60 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -641,6 +641,26 @@ static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
}
+static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList *list, ram_addr_t addr)
+{
+ MemoryDeviceInfoList *info;
+
+ for (info = list; info; info = info->next) {
+ MemoryDeviceInfo *value = info->value;
+
+ if (value && value->type == MEMORY_DEVICE_INFO_KIND_DIMM) {
+ PCDIMMDeviceInfo *pcdimm_info = value->u.dimm.data;
+
+ if (pcdimm_info->addr >= addr &&
+ addr < (pcdimm_info->addr + pcdimm_info->size)) {
+ return pcdimm_info->node;
+ }
+ }
+ }
+
+ return -1;
+}
+
/*
* Adds ibm,dynamic-reconfiguration-memory node.
* Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
@@ -658,6 +678,7 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
lmb_size;
uint32_t *int_buf, *cur_index, buf_len;
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
+ MemoryDeviceInfoList *dimms = NULL;
/*
* Don't create the node if there is no hotpluggable memory
@@ -692,6 +713,11 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
goto out;
}
+ if (hotplug_lmb_start) {
+ MemoryDeviceInfoList **prev = &dimms;
+ qmp_pc_dimm_device_list(qdev_get_machine(), &prev);
+ }
+
/* ibm,dynamic-memory */
int_buf[0] = cpu_to_be32(nr_lmbs);
cur_index++;
@@ -709,7 +735,7 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
dynamic_memory[2] = cpu_to_be32(spapr_drc_index(drc));
dynamic_memory[3] = cpu_to_be32(0); /* reserved */
- dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
+ dynamic_memory[4] = cpu_to_be32(spapr_pc_dimm_node(dimms, addr));
if (memory_region_present(get_system_memory(), addr)) {
dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
} else {
@@ -732,6 +758,7 @@ static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
}
+ qapi_free_MemoryDeviceInfoList(dimms);
ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
if (ret < 0) {
goto out;
@@ -916,9 +943,8 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
_FDT(fdt_setprop_cell(fdt, rtas, "rtas-event-scan-rate",
RTAS_EVENT_SCAN_RATE));
- if (msi_nonbroken) {
- _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
- }
+ g_assert(msi_nonbroken);
+ _FDT(fdt_setprop(fdt, rtas, "ibm,change-msix-capable", NULL, 0));
/*
* According to PAPR, rtas ibm,os-term does not guarantee a return
@@ -1427,7 +1453,7 @@ static int spapr_reset_drcs(Object *child, void *opaque)
return 0;
}
-static void ppc_spapr_reset(void)
+static void spapr_machine_reset(void)
{
MachineState *machine = MACHINE(qdev_get_machine());
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
@@ -1440,7 +1466,10 @@ static void ppc_spapr_reset(void)
/* Check for unknown sysbus devices */
foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
- if (kvm_enabled() && kvmppc_has_cap_mmu_radix()) {
+ first_ppc_cpu = POWERPC_CPU(first_cpu);
+ if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
+ ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
+ spapr->max_compat_pvr)) {
/* If using KVM with radix mode available, VCPUs can be started
* without a HPT because KVM will start them in radix mode.
* Set the GR bit in PATB so that we know there is no HPT. */
@@ -1499,7 +1528,6 @@ static void ppc_spapr_reset(void)
g_free(fdt);
/* Set up the entry state */
- first_ppc_cpu = POWERPC_CPU(first_cpu);
first_ppc_cpu->env.gpr[3] = fdt_addr;
first_ppc_cpu->env.gpr[5] = 0;
first_cpu->halted = 0;
@@ -2265,7 +2293,7 @@ out:
}
/* pSeries LPAR / sPAPR hardware init */
-static void ppc_spapr_init(MachineState *machine)
+static void spapr_machine_init(MachineState *machine)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
@@ -2793,7 +2821,7 @@ static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name,
visit_type_uint32(v, name, (uint32_t *)opaque, errp);
}
-static void spapr_machine_initfn(Object *obj)
+static void spapr_instance_init(Object *obj)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
@@ -3180,12 +3208,10 @@ void spapr_core_release(DeviceState *dev)
if (smc->pre_2_10_has_unused_icps) {
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
- sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
- size_t size = object_type_get_instance_size(scc->cpu_type);
int i;
for (i = 0; i < cc->nr_threads; i++) {
- CPUState *cs = CPU(sc->threads + i * size);
+ CPUState *cs = CPU(sc->threads[i]);
pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
}
@@ -3231,7 +3257,7 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(dev);
- CPUState *cs = CPU(core->threads);
+ CPUState *cs = CPU(core->threads[0]);
sPAPRDRConnector *drc;
Error *local_err = NULL;
int smt = kvmppc_smt_threads();
@@ -3276,15 +3302,12 @@ static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
core_slot->cpu = OBJECT(dev);
if (smc->pre_2_10_has_unused_icps) {
- sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
- size_t size = object_type_get_instance_size(scc->cpu_type);
int i;
for (i = 0; i < cc->nr_threads; i++) {
sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev);
- void *obj = sc->threads + i * size;
- cs = CPU(obj);
+ cs = CPU(sc->threads[i]);
pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
}
}
@@ -3563,6 +3586,139 @@ static ICPState *spapr_icp_get(XICSFabric *xi, int vcpu_id)
return cpu ? ICP(cpu->intc) : NULL;
}
+#define ICS_IRQ_FREE(ics, srcno) \
+ (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
+
+static int ics_find_free_block(ICSState *ics, int num, int alignnum)
+{
+ int first, i;
+
+ for (first = 0; first < ics->nr_irqs; first += alignnum) {
+ if (num > (ics->nr_irqs - first)) {
+ return -1;
+ }
+ for (i = first; i < first + num; ++i) {
+ if (!ICS_IRQ_FREE(ics, i)) {
+ break;
+ }
+ }
+ if (i == (first + num)) {
+ return first;
+ }
+ }
+
+ return -1;
+}
+
+/*
+ * Allocate the IRQ number and set the IRQ type, LSI or MSI
+ */
+static void spapr_irq_set_lsi(sPAPRMachineState *spapr, int irq, bool lsi)
+{
+ ics_set_irq_type(spapr->ics, irq - spapr->ics->offset, lsi);
+}
+
+int spapr_irq_alloc(sPAPRMachineState *spapr, int irq_hint, bool lsi,
+ Error **errp)
+{
+ ICSState *ics = spapr->ics;
+ int irq;
+
+ if (!ics) {
+ return -1;
+ }
+ if (irq_hint) {
+ if (!ICS_IRQ_FREE(ics, irq_hint - ics->offset)) {
+ error_setg(errp, "can't allocate IRQ %d: already in use", irq_hint);
+ return -1;
+ }
+ irq = irq_hint;
+ } else {
+ irq = ics_find_free_block(ics, 1, 1);
+ if (irq < 0) {
+ error_setg(errp, "can't allocate IRQ: no IRQ left");
+ return -1;
+ }
+ irq += ics->offset;
+ }
+
+ spapr_irq_set_lsi(spapr, irq, lsi);
+ trace_spapr_irq_alloc(irq);
+
+ return irq;
+}
+
+/*
+ * Allocate block of consecutive IRQs, and return the number of the first IRQ in
+ * the block. If align==true, aligns the first IRQ number to num.
+ */
+int spapr_irq_alloc_block(sPAPRMachineState *spapr, int num, bool lsi,
+ bool align, Error **errp)
+{
+ ICSState *ics = spapr->ics;
+ int i, first = -1;
+
+ if (!ics) {
+ return -1;
+ }
+
+ /*
+ * MSIMesage::data is used for storing VIRQ so
+ * it has to be aligned to num to support multiple
+ * MSI vectors. MSI-X is not affected by this.
+ * The hint is used for the first IRQ, the rest should
+ * be allocated continuously.
+ */
+ if (align) {
+ assert((num == 1) || (num == 2) || (num == 4) ||
+ (num == 8) || (num == 16) || (num == 32));
+ first = ics_find_free_block(ics, num, num);
+ } else {
+ first = ics_find_free_block(ics, num, 1);
+ }
+ if (first < 0) {
+ error_setg(errp, "can't find a free %d-IRQ block", num);
+ return -1;
+ }
+
+ first += ics->offset;
+ for (i = first; i < first + num; ++i) {
+ spapr_irq_set_lsi(spapr, i, lsi);
+ }
+
+ trace_spapr_irq_alloc_block(first, num, lsi, align);
+
+ return first;
+}
+
+void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num)
+{
+ ICSState *ics = spapr->ics;
+ int srcno = irq - ics->offset;
+ int i;
+
+ if (ics_valid_irq(ics, irq)) {
+ trace_spapr_irq_free(0, irq, num);
+ for (i = srcno; i < srcno + num; ++i) {
+ if (ICS_IRQ_FREE(ics, i)) {
+ trace_spapr_irq_free_warn(0, i + ics->offset);
+ }
+ memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
+ }
+ }
+}
+
+qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq)
+{
+ ICSState *ics = spapr->ics;
+
+ if (ics_valid_irq(ics, irq)) {
+ return ics->qirqs[irq - ics->offset];
+ }
+
+ return NULL;
+}
+
static void spapr_pic_print_info(InterruptStatsProvider *obj,
Monitor *mon)
{
@@ -3622,8 +3778,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
* functions for the specific versioned machine types can override
* these details for backwards compatibility
*/
- mc->init = ppc_spapr_init;
- mc->reset = ppc_spapr_reset;
+ mc->init = spapr_machine_init;
+ mc->reset = spapr_machine_reset;
mc->block_default_type = IF_SCSI;
mc->max_cpus = 1024;
mc->no_parallel = 1;
@@ -3670,7 +3826,7 @@ static const TypeInfo spapr_machine_info = {
.parent = TYPE_MACHINE,
.abstract = true,
.instance_size = sizeof(sPAPRMachineState),
- .instance_init = spapr_machine_initfn,
+ .instance_init = spapr_instance_init,
.instance_finalize = spapr_machine_finalizefn,
.class_size = sizeof(sPAPRMachineClass),
.class_init = spapr_machine_class_init,
@@ -3714,27 +3870,47 @@ static const TypeInfo spapr_machine_info = {
type_init(spapr_machine_register_##suffix)
/*
+ * pseries-2.12
+ */
+static void spapr_machine_2_12_instance_options(MachineState *machine)
+{
+}
+
+static void spapr_machine_2_12_class_options(MachineClass *mc)
+{
+ /* Defaults for the latest behaviour inherited from the base class */
+}
+
+DEFINE_SPAPR_MACHINE(2_12, "2.12", true);
+
+/*
* pseries-2.11
*/
+#define SPAPR_COMPAT_2_11 \
+ HW_COMPAT_2_11
+
static void spapr_machine_2_11_instance_options(MachineState *machine)
{
+ spapr_machine_2_12_instance_options(machine);
}
static void spapr_machine_2_11_class_options(MachineClass *mc)
{
- /* Defaults for the latest behaviour inherited from the base class */
+ spapr_machine_2_12_class_options(mc);
+ SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_11);
}
-DEFINE_SPAPR_MACHINE(2_11, "2.11", true);
+DEFINE_SPAPR_MACHINE(2_11, "2.11", false);
/*
* pseries-2.10
*/
#define SPAPR_COMPAT_2_10 \
- HW_COMPAT_2_10 \
+ HW_COMPAT_2_10
static void spapr_machine_2_10_instance_options(MachineState *machine)
{
+ spapr_machine_2_11_instance_options(machine);
}
static void spapr_machine_2_10_class_options(MachineClass *mc)
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 3a4c174012..ac19b2e0b7 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -6,6 +6,7 @@
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
+#include "qemu/osdep.h"
#include "hw/cpu/core.h"
#include "hw/ppc/spapr_cpu_core.h"
#include "target/ppc/cpu.h"
@@ -26,6 +27,7 @@ static void spapr_cpu_reset(void *opaque)
PowerPCCPU *cpu = opaque;
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
cpu_reset(cs);
@@ -35,6 +37,13 @@ static void spapr_cpu_reset(void *opaque)
cs->halted = 1;
env->spr[SPR_HIOR] = 0;
+
+ /* Disable Power-saving mode Exit Cause exceptions for the CPU.
+ * This can cause issues when rebooting the guest if a secondary
+ * is awaken */
+ if (cs != first_cpu) {
+ env->spr[SPR_LPCR] &= ~pcc->lpcr_pm;
+ }
}
static void spapr_cpu_destroy(PowerPCCPU *cpu)
@@ -79,13 +88,11 @@ const char *spapr_get_cpu_core_type(const char *cpu_type)
static void spapr_cpu_core_unrealizefn(DeviceState *dev, Error **errp)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
- sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
- size_t size = object_type_get_instance_size(scc->cpu_type);
CPUCore *cc = CPU_CORE(dev);
int i;
for (i = 0; i < cc->nr_threads; i++) {
- void *obj = sc->threads + i * size;
+ Object *obj = OBJECT(sc->threads[i]);
DeviceState *dev = DEVICE(obj);
CPUState *cs = CPU(dev);
PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -104,7 +111,6 @@ static void spapr_cpu_core_realize_child(Object *child,
Error *local_err = NULL;
CPUState *cs = CPU(child);
PowerPCCPU *cpu = POWERPC_CPU(cs);
- Object *obj;
object_property_set_bool(child, true, "realized", &local_err);
if (local_err) {
@@ -116,21 +122,14 @@ static void spapr_cpu_core_realize_child(Object *child,
goto error;
}
- obj = object_new(spapr->icp_type);
- object_property_add_child(child, "icp", obj, &error_abort);
- object_unref(obj);
- object_property_add_const_link(obj, ICP_PROP_XICS, OBJECT(spapr),
- &error_abort);
- object_property_add_const_link(obj, ICP_PROP_CPU, child, &error_abort);
- object_property_set_bool(obj, true, "realized", &local_err);
+ cpu->intc = icp_create(child, spapr->icp_type, XICS_FABRIC(spapr),
+ &local_err);
if (local_err) {
- goto free_icp;
+ goto error;
}
return;
-free_icp:
- object_unparent(obj);
error:
error_propagate(errp, local_err);
}
@@ -146,9 +145,8 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
CPUCore *cc = CPU_CORE(OBJECT(dev));
- size_t size;
Error *local_err = NULL;
- void *obj;
+ Object *obj;
int i, j;
if (!spapr) {
@@ -156,18 +154,16 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
return;
}
- size = object_type_get_instance_size(scc->cpu_type);
- sc->threads = g_malloc0(size * cc->nr_threads);
+ sc->threads = g_new(PowerPCCPU *, cc->nr_threads);
for (i = 0; i < cc->nr_threads; i++) {
char id[32];
CPUState *cs;
PowerPCCPU *cpu;
- obj = sc->threads + i * size;
+ obj = object_new(scc->cpu_type);
- object_initialize(obj, size, scc->cpu_type);
cs = CPU(obj);
- cpu = POWERPC_CPU(cs);
+ cpu = sc->threads[i] = POWERPC_CPU(obj);
cs->cpu_index = cc->core_id + i;
cpu->vcpu_id = (cc->core_id * spapr->vsmt / smp_threads) + i;
if (kvm_enabled() && !kvm_vcpu_id_is_valid(cpu->vcpu_id)) {
@@ -192,7 +188,7 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
}
for (j = 0; j < cc->nr_threads; j++) {
- obj = sc->threads + j * size;
+ obj = OBJECT(sc->threads[j]);
spapr_cpu_core_realize_child(obj, spapr, &local_err);
if (local_err) {
@@ -203,7 +199,7 @@ static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
err:
while (--i >= 0) {
- obj = sc->threads + i * size;
+ obj = OBJECT(sc->threads[i]);
object_unparent(obj);
}
g_free(sc->threads);
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index e377fc7dde..86836f0626 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -282,8 +282,7 @@ void spapr_dt_events(sPAPRMachineState *spapr, void *fdt)
continue;
}
- interrupts[0] = cpu_to_be32(source->irq);
- interrupts[1] = 0;
+ spapr_dt_xics_irq(interrupts, source->irq, false);
_FDT(node_offset = fdt_add_subnode(fdt, event_sources, source_name));
_FDT(fdt_setprop(fdt, node_offset, "interrupts", interrupts,
@@ -293,9 +292,6 @@ void spapr_dt_events(sPAPRMachineState *spapr, void *fdt)
irq_ranges[count++] = cpu_to_be32(1);
}
- irq_ranges[count] = cpu_to_be32(count);
- count++;
-
_FDT((fdt_setprop(fdt, event_sources, "interrupt-controller", NULL, 0)));
_FDT((fdt_setprop_cell(fdt, event_sources, "#interrupt-cells", 2)));
_FDT((fdt_setprop(fdt, event_sources, "interrupt-ranges",
@@ -472,9 +468,8 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
rtas_event_log_queue(spapr, entry);
- qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
- rtas_event_log_to_irq(spapr,
- RTAS_LOG_TYPE_EPOW)));
+ qemu_irq_pulse(spapr_qirq(spapr,
+ rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_EPOW)));
}
static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
@@ -556,9 +551,8 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
rtas_event_log_queue(spapr, entry);
- qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
- rtas_event_log_to_irq(spapr,
- RTAS_LOG_TYPE_HOTPLUG)));
+ qemu_irq_pulse(spapr_qirq(spapr,
+ rtas_event_log_to_irq(spapr, RTAS_LOG_TYPE_HOTPLUG)));
}
void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
@@ -678,7 +672,7 @@ static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
spapr_event_sources_get_source(spapr->event_sources, i);
g_assert(source->enabled);
- qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), source->irq));
+ qemu_irq_pulse(spapr_qirq(spapr, source->irq));
}
}
@@ -718,7 +712,7 @@ void spapr_events_init(sPAPRMachineState *spapr)
spapr->event_sources = spapr_event_sources_new();
spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_EPOW,
- spapr_ics_alloc(spapr->ics, 0, false,
+ spapr_irq_alloc(spapr, 0, false,
&error_fatal));
/* NOTE: if machine supports modern/dedicated hotplug event source,
@@ -731,7 +725,7 @@ void spapr_events_init(sPAPRMachineState *spapr)
*/
if (spapr->use_hotplug_event_source) {
spapr_event_sources_register(spapr->event_sources, EVENT_CLASS_HOT_PLUG,
- spapr_ics_alloc(spapr->ics, 0, false,
+ spapr_irq_alloc(spapr, 0, false,
&error_fatal));
}
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index be22a6b289..51eba52e86 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -13,7 +13,6 @@
#include "trace.h"
#include "kvm_ppc.h"
#include "hw/ppc/spapr_ovec.h"
-#include "qemu/error-report.h"
#include "mmu-book3s-v3.h"
struct SPRSyncState {
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 5a3122a9f9..88797b3d36 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -314,7 +314,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return;
}
- spapr_ics_free(spapr->ics, msi->first_irq, msi->num);
+ spapr_irq_free(spapr, msi->first_irq, msi->num);
if (msi_present(pdev)) {
spapr_msi_setmsg(pdev, 0, false, 0, 0);
}
@@ -352,7 +352,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
/* Allocate MSIs */
- irq = spapr_ics_alloc_block(spapr->ics, req_num, false,
+ irq = spapr_irq_alloc_block(spapr, req_num, false,
ret_intr_type == RTAS_TYPE_MSI, &err);
if (err) {
error_reportf_err(err, "Can't allocate MSIs for device %x: ",
@@ -363,7 +363,7 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
/* Release previous MSIs */
if (msi) {
- spapr_ics_free(spapr->ics, msi->first_irq, msi->num);
+ spapr_irq_free(spapr, msi->first_irq, msi->num);
g_hash_table_remove(phb->msi, &config_addr);
}
@@ -723,7 +723,7 @@ static void spapr_msi_write(void *opaque, hwaddr addr,
trace_spapr_pci_msi_write(addr, data, irq);
- qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr), irq));
+ qemu_irq_pulse(spapr_qirq(spapr, irq));
}
static const MemoryRegionOps spapr_msi_ops = {
@@ -1675,7 +1675,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
uint32_t irq;
Error *local_err = NULL;
- irq = spapr_ics_alloc_block(spapr->ics, 1, true, false, &local_err);
+ irq = spapr_irq_alloc_block(spapr, 1, true, false, &local_err);
if (local_err) {
error_propagate(errp, local_err);
error_prepend(errp, "can't allocate LSIs: ");
@@ -2121,8 +2121,7 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
irqmap[2] = 0;
irqmap[3] = cpu_to_be32(j+1);
irqmap[4] = cpu_to_be32(xics_phandle);
- irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
- irqmap[6] = cpu_to_be32(0x8);
+ spapr_dt_xics_irq(&irqmap[5], phb->lsi_table[lsi_num].irq, true);
}
}
/* Write interrupt map */
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index cdf0b607a0..4bb939d3d1 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -162,6 +162,7 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr,
if (cpu != NULL) {
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
if (!cs->halted) {
rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
@@ -174,6 +175,10 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr,
kvm_cpu_synchronize_state(cs);
env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME);
+
+ /* Enable Power-saving mode Exit Cause exceptions for the new CPU */
+ env->spr[SPR_LPCR] |= pcc->lpcr_pm;
+
env->nip = start;
env->gpr[3] = r3;
cs->halted = 0;
@@ -197,19 +202,15 @@ static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
cs->halted = 1;
qemu_cpu_kick(cs);
- /*
- * While stopping a CPU, the guest calls H_CPPR which
- * effectively disables interrupts on XICS level.
- * However decrementer interrupts in TCG can still
- * wake the CPU up so here we disable interrupts in MSR
- * as well.
- * As rtas_start_cpu() resets the whole MSR anyway, there is
- * no need to bother with specific bits, we just clear it.
- */
- env->msr = 0;
+
+ /* Disable Power-saving mode Exit Cause exceptions for the CPU.
+ * This could deliver an interrupt on a dying CPU and crash the
+ * guest */
+ env->spr[SPR_LPCR] &= ~pcc->lpcr_pm;
}
static inline int sysparm_st(target_ulong addr, target_ulong len,
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index ea3bc8bd9e..472dd6f33a 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -126,8 +126,9 @@ static int vio_make_devnode(VIOsPAPRDevice *dev,
}
if (dev->irq) {
- uint32_t ints_prop[] = {cpu_to_be32(dev->irq), 0};
+ uint32_t ints_prop[2];
+ spapr_dt_xics_irq(ints_prop, dev->irq, false);
ret = fdt_setprop(fdt, node_off, "interrupts", ints_prop,
sizeof(ints_prop));
if (ret < 0) {
@@ -454,7 +455,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
dev->qdev.id = id;
}
- dev->irq = spapr_ics_alloc(spapr->ics, dev->irq, false, &local_err);
+ dev->irq = spapr_irq_alloc(spapr, dev->irq, false, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index 4a6a6490fa..b7c3e64b5e 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -12,6 +12,10 @@ spapr_pci_msi_retry(unsigned config_addr, unsigned req_num, unsigned max_irqs) "
# hw/ppc/spapr.c
spapr_cas_failed(unsigned long n) "DT diff buffer is too small: %ld bytes"
spapr_cas_continue(unsigned long n) "Copy changes to the guest: %ld bytes"
+spapr_irq_alloc(int irq) "irq %d"
+spapr_irq_alloc_block(int first, int num, bool lsi, int align) "first irq %d, %d irqs, lsi=%d, alignnum %d"
+spapr_irq_free(int src, int irq, int num) "Source#%d, first irq %d, %d irqs"
+spapr_irq_free_warn(int src, int irq) "Source#%d, irq %d is already free"
# hw/ppc/spapr_hcall.c
spapr_cas_pvr_try(uint32_t pvr) "0x%x"
diff --git a/hw/s390x/3270-ccw.c b/hw/s390x/3270-ccw.c
index 081e3ef6f4..3af13ea027 100644
--- a/hw/s390x/3270-ccw.c
+++ b/hw/s390x/3270-ccw.c
@@ -104,7 +104,7 @@ static void emulated_ccw_3270_realize(DeviceState *ds, Error **errp)
SubchDev *sch;
Error *err = NULL;
- sch = css_create_sch(cdev->devno, true, cbus->squash_mcss, errp);
+ sch = css_create_sch(cdev->devno, cbus->squash_mcss, errp);
if (!sch) {
return;
}
diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c
index c4a9735d71..a02d708239 100644
--- a/hw/s390x/css-bridge.c
+++ b/hw/s390x/css-bridge.c
@@ -99,6 +99,8 @@ VirtualCssBus *virtual_css_bus_init(void)
/* Create bridge device */
dev = qdev_create(NULL, TYPE_VIRTUAL_CSS_BRIDGE);
+ object_property_add_child(qdev_get_machine(), TYPE_VIRTUAL_CSS_BRIDGE,
+ OBJECT(dev), NULL);
qdev_init_nofail(dev);
/* Create bus on bridge device */
@@ -123,6 +125,11 @@ static Property virtual_css_bridge_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static bool prop_get_true(Object *obj, Error **errp)
+{
+ return true;
+}
+
static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
{
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
@@ -131,6 +138,12 @@ static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
hc->unplug = ccw_device_unplug;
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->props = virtual_css_bridge_properties;
+ object_class_property_add_bool(klass, "cssid-unrestricted",
+ prop_get_true, NULL, NULL);
+ object_class_property_set_description(klass, "cssid-unrestricted",
+ "A css device can use any cssid, regardless whether virtual"
+ " or not (read only, always true)",
+ NULL);
}
static const TypeInfo virtual_css_bridge_info = {
diff --git a/hw/s390x/css.c b/hw/s390x/css.c
index f6b5c807cd..1c526fd7e2 100644
--- a/hw/s390x/css.c
+++ b/hw/s390x/css.c
@@ -13,7 +13,6 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "hw/qdev.h"
-#include "qemu/error-report.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "exec/address-spaces.h"
@@ -1723,12 +1722,6 @@ void css_undo_stcrw(CRW *crw)
QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
}
-int css_do_tpi(IOIntCode *int_code, int lowcore)
-{
- /* No pending interrupts for !KVM. */
- return 0;
- }
-
int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
int rfmt, void *buf)
{
@@ -2370,22 +2363,12 @@ const PropertyInfo css_devid_ro_propinfo = {
.get = get_css_devid,
};
-SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
- Error **errp)
+SubchDev *css_create_sch(CssDevId bus_id, bool squash_mcss, Error **errp)
{
uint16_t schid = 0;
SubchDev *sch;
if (bus_id.valid) {
- if (is_virtual != (bus_id.cssid == VIRTUAL_CSSID)) {
- error_setg(errp, "cssid %hhx not valid for %s devices",
- bus_id.cssid,
- (is_virtual ? "virtual" : "non-virtual"));
- return NULL;
- }
- }
-
- if (bus_id.valid) {
if (squash_mcss) {
bus_id.cssid = channel_subsys.default_cssid;
} else if (!channel_subsys.css[bus_id.cssid]) {
@@ -2396,19 +2379,8 @@ SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
bus_id.devid, &schid, errp)) {
return NULL;
}
- } else if (squash_mcss || is_virtual) {
- bus_id.cssid = channel_subsys.default_cssid;
-
- if (!css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
- &bus_id.devid, &schid, errp)) {
- return NULL;
- }
} else {
- for (bus_id.cssid = 0; bus_id.cssid < MAX_CSSID; ++bus_id.cssid) {
- if (bus_id.cssid == VIRTUAL_CSSID) {
- continue;
- }
-
+ for (bus_id.cssid = channel_subsys.default_cssid;;) {
if (!channel_subsys.css[bus_id.cssid]) {
css_create_css_image(bus_id.cssid, false);
}
@@ -2418,7 +2390,8 @@ SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
NULL)) {
break;
}
- if (bus_id.cssid == MAX_CSSID) {
+ bus_id.cssid = (bus_id.cssid + 1) % MAX_CSSID;
+ if (bus_id.cssid == channel_subsys.default_cssid) {
error_setg(errp, "Virtual channel subsystem is full!");
return NULL;
}
diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c
index 0ef232ec27..4a9d4d2534 100644
--- a/hw/s390x/s390-ccw.c
+++ b/hw/s390x/s390-ccw.c
@@ -77,7 +77,7 @@ static void s390_ccw_realize(S390CCWDevice *cdev, char *sysfsdev, Error **errp)
goto out_err_propagate;
}
- sch = css_create_sch(ccw_dev->devno, false, cbus->squash_mcss, &err);
+ sch = css_create_sch(ccw_dev->devno, cbus->squash_mcss, &err);
if (!sch) {
goto out_mdevid_free;
}
diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h
index 560bd82a0f..2993f0ddef 100644
--- a/hw/s390x/s390-pci-bus.h
+++ b/hw/s390x/s390-pci-bus.h
@@ -284,6 +284,7 @@ struct S390PCIBusDevice {
uint64_t fmb_addr;
uint8_t isc;
uint16_t noi;
+ uint16_t maxstbl;
uint8_t sum;
S390MsixInfo msix;
AdapterRoutes routes;
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index 8e088f3dc9..be449210d9 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -142,7 +142,7 @@ out:
return rc;
}
-int clp_service_call(S390CPU *cpu, uint8_t r2)
+int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
{
ClpReqHdr *reqh;
ClpRspHdr *resh;
@@ -158,37 +158,40 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 4);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
reqh = (ClpReqHdr *)buffer;
req_len = lduw_p(&reqh->len);
if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
req_len + sizeof(*resh))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
resh = (ClpRspHdr *)(buffer + req_len);
res_len = lduw_p(&resh->len);
if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if ((req_len + res_len) > 8192) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
req_len + res_len)) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
@@ -294,6 +297,7 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
stw_p(&resgrp->mui, 0);
stw_p(&resgrp->i, 128);
+ stw_p(&resgrp->maxstbl, 128);
resgrp->version = 0;
stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
@@ -308,19 +312,78 @@ int clp_service_call(S390CPU *cpu, uint8_t r2)
out:
if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
req_len + res_len)) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
setcc(cpu, cc);
return 0;
}
-int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+/**
+ * Swap data contained in s390x big endian registers to little endian
+ * PCI bars.
+ *
+ * @ptr: a pointer to a uint64_t data field
+ * @len: the length of the valid data, must be 1,2,4 or 8
+ */
+static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
+{
+ uint64_t data = *ptr;
+
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ data = bswap16(data);
+ break;
+ case 4:
+ data = bswap32(data);
+ break;
+ case 8:
+ data = bswap64(data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *ptr = data;
+ return 0;
+}
+
+static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
+ uint8_t len)
+{
+ MemoryRegion *subregion;
+ uint64_t subregion_size;
+
+ QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
+ subregion_size = int128_get64(subregion->size);
+ if ((offset >= subregion->addr) &&
+ (offset + len) <= (subregion->addr + subregion_size)) {
+ mr = subregion;
+ break;
+ }
+ }
+ return mr;
+}
+
+static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
+ uint64_t offset, uint64_t *data, uint8_t len)
+{
+ MemoryRegion *mr;
+
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ mr = s390_get_subregion(mr, offset, len);
+ offset -= mr->addr;
+ return memory_region_dispatch_read(mr, offset, data, len,
+ MEMTXATTRS_UNSPECIFIED);
+}
+
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
uint64_t offset;
uint64_t data;
- MemoryRegion *mr;
MemTxResult result;
uint8_t len;
uint32_t fh;
@@ -329,12 +392,12 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 4);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
}
if (r2 & 0x1) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
@@ -343,6 +406,11 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
+ if (!(fh & FH_MASK_ENABLE)) {
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
DPRINTF("pcilg no pci dev\n");
@@ -351,12 +419,7 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
}
switch (pbdev->state) {
- case ZPCI_FS_RESERVED:
- case ZPCI_FS_STANDBY:
- case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
- setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
- return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
@@ -365,44 +428,33 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
break;
}
- if (pcias < 6) {
- if ((8 - (offset & 0x7)) < len) {
- program_interrupt(env, PGM_OPERAND, 4);
+ switch (pcias) {
+ case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
+ if (!len || (len > (8 - (offset & 0x7)))) {
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
- mr = pbdev->pdev->io_regions[pcias].memory;
- result = memory_region_dispatch_read(mr, offset, &data, len,
- MEMTXATTRS_UNSPECIFIED);
+ result = zpci_read_bar(pbdev, pcias, offset, &data, len);
if (result != MEMTX_OK) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
- } else if (pcias == 15) {
- if ((4 - (offset & 0x3)) < len) {
- program_interrupt(env, PGM_OPERAND, 4);
+ break;
+ case ZPCI_CONFIG_BAR:
+ if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
data = pci_host_config_read_common(
pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
- switch (len) {
- case 1:
- break;
- case 2:
- data = bswap16(data);
- break;
- case 4:
- data = bswap32(data);
- break;
- case 8:
- data = bswap64(data);
- break;
- default:
- program_interrupt(env, PGM_OPERAND, 4);
+ if (zpci_endian_swap(&data, len)) {
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
- } else {
- DPRINTF("invalid space\n");
+ break;
+ default:
+ DPRINTF("pcilg invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
return 0;
@@ -413,24 +465,23 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
return 0;
}
-static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
+static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
+ uint64_t offset, uint64_t data, uint8_t len)
{
- if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
- offset >= pbdev->msix.table_offset &&
- offset < (pbdev->msix.table_offset +
- pbdev->msix.entries * PCI_MSIX_ENTRY_SIZE)) {
- return 1;
- } else {
- return 0;
- }
+ MemoryRegion *mr;
+
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ mr = s390_get_subregion(mr, offset, len);
+ offset -= mr->addr;
+ return memory_region_dispatch_write(mr, offset, data, len,
+ MEMTXATTRS_UNSPECIFIED);
}
-int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint64_t offset, data;
S390PCIBusDevice *pbdev;
- MemoryRegion *mr;
MemTxResult result;
uint8_t len;
uint32_t fh;
@@ -439,12 +490,12 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 4);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
}
if (r2 & 0x1) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
@@ -452,6 +503,12 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
pcias = (env->regs[r2] >> 16) & 0xf;
len = env->regs[r2] & 0xf;
offset = env->regs[r2 + 1];
+ data = env->regs[r1];
+
+ if (!(fh & FH_MASK_ENABLE)) {
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
if (!pbdev) {
@@ -461,12 +518,10 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
}
switch (pbdev->state) {
- case ZPCI_FS_RESERVED:
- case ZPCI_FS_STANDBY:
- case ZPCI_FS_DISABLED:
+ /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
+ * are already covered by the FH_MASK_ENABLE check above
+ */
case ZPCI_FS_PERMANENT_ERROR:
- setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
- return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
@@ -475,52 +530,37 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
break;
}
- data = env->regs[r1];
- if (pcias < 6) {
- if ((8 - (offset & 0x7)) < len) {
- program_interrupt(env, PGM_OPERAND, 4);
+ switch (pcias) {
+ /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
+ case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
+ /* Check length:
+ * A length of 0 is invalid and length should not cross a double word
+ */
+ if (!len || (len > (8 - (offset & 0x7)))) {
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
- if (trap_msix(pbdev, offset, pcias)) {
- offset = offset - pbdev->msix.table_offset;
- mr = &pbdev->pdev->msix_table_mmio;
- } else {
- mr = pbdev->pdev->io_regions[pcias].memory;
- }
-
- result = memory_region_dispatch_write(mr, offset, data, len,
- MEMTXATTRS_UNSPECIFIED);
+ result = zpci_write_bar(pbdev, pcias, offset, data, len);
if (result != MEMTX_OK) {
- program_interrupt(env, PGM_OPERAND, 4);
- return 0;
- }
- } else if (pcias == 15) {
- if ((4 - (offset & 0x3)) < len) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
- switch (len) {
- case 1:
- break;
- case 2:
- data = bswap16(data);
- break;
- case 4:
- data = bswap32(data);
- break;
- case 8:
- data = bswap64(data);
- break;
- default:
- program_interrupt(env, PGM_OPERAND, 4);
+ break;
+ case ZPCI_CONFIG_BAR:
+ /* ZPCI uses the pseudo BAR number 15 as configuration space */
+ /* possible access lengths are 1,2,4 and must not cross a word */
+ if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return 0;
}
-
+ /* len = 1,2,4 so we do not need to test */
+ zpci_endian_swap(&data, len);
pci_host_config_write_common(pbdev->pdev, offset,
pci_config_size(pbdev->pdev),
data, len);
- } else {
+ break;
+ default:
DPRINTF("pcistg invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
@@ -531,7 +571,7 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
return 0;
}
-int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint32_t fh;
@@ -545,12 +585,12 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
cpu_synchronize_state(CPU(cpu));
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 4);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
goto out;
}
if (r2 & 0x1) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
goto out;
}
@@ -624,12 +664,13 @@ out:
}
int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
- uint8_t ar)
+ uint8_t ar, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
MemTxResult result;
+ uint64_t offset;
int i;
uint32_t fh;
uint8_t pcias;
@@ -637,29 +678,17 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 6);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
return 0;
}
fh = env->regs[r1] >> 32;
pcias = (env->regs[r1] >> 16) & 0xf;
len = env->regs[r1] & 0xff;
+ offset = env->regs[r3];
- if (pcias > 5) {
- DPRINTF("pcistb invalid space\n");
- setcc(cpu, ZPCI_PCI_LS_ERR);
- s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
- return 0;
- }
-
- switch (len) {
- case 16:
- case 32:
- case 64:
- case 128:
- break;
- default:
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ if (!(fh & FH_MASK_ENABLE)) {
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
@@ -671,12 +700,7 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
}
switch (pbdev->state) {
- case ZPCI_FS_RESERVED:
- case ZPCI_FS_STANDBY:
- case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
- setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
- return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
@@ -685,28 +709,62 @@ int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
break;
}
+ if (pcias > ZPCI_IO_BAR_MAX) {
+ DPRINTF("pcistb invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ /* Verify the address, offset and length */
+ /* offset must be a multiple of 8 */
+ if (offset % 8) {
+ goto specification_error;
+ }
+ /* Length must be greater than 8, a multiple of 8 */
+ /* and not greater than maxstbl */
+ if ((len <= 8) || (len % 8) || (len > pbdev->maxstbl)) {
+ goto specification_error;
+ }
+ /* Do not cross a 4K-byte boundary */
+ if (((offset & 0xfff) + len) > 0x1000) {
+ goto specification_error;
+ }
+ /* Guest address must be double word aligned */
+ if (gaddr & 0x07UL) {
+ goto specification_error;
+ }
+
mr = pbdev->pdev->io_regions[pcias].memory;
- if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
- program_interrupt(env, PGM_OPERAND, 6);
+ mr = s390_get_subregion(mr, offset, len);
+ offset -= mr->addr;
+
+ if (!memory_region_access_valid(mr, offset, len, true)) {
+ s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
for (i = 0; i < len / 8; i++) {
- result = memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
- ldq_p(buffer + i * 8), 8,
- MEMTXATTRS_UNSPECIFIED);
+ result = memory_region_dispatch_write(mr, offset + i * 8,
+ ldq_p(buffer + i * 8), 8,
+ MEMTXATTRS_UNSPECIFIED);
if (result != MEMTX_OK) {
- program_interrupt(env, PGM_OPERAND, 6);
+ s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
}
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
+
+specification_error:
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
+ return 0;
}
static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
@@ -767,7 +825,8 @@ int pci_dereg_irqs(S390PCIBusDevice *pbdev)
return 0;
}
-static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib)
+static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib,
+ uintptr_t ra)
{
uint64_t pba = ldq_p(&fib.pba);
uint64_t pal = ldq_p(&fib.pal);
@@ -776,14 +835,14 @@ static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib)
uint8_t t = (g_iota >> 11) & 0x1;
if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
- program_interrupt(env, PGM_OPERAND, 6);
+ s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return -EINVAL;
}
/* currently we only support designation type 1 with translation */
if (!(dt == ZPCI_IOTA_RTTO && t)) {
error_report("unsupported ioat dt %d t %d", dt, t);
- program_interrupt(env, PGM_OPERAND, 6);
+ s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return -EINVAL;
}
@@ -804,7 +863,8 @@ void pci_dereg_ioat(S390PCIIOMMU *iommu)
iommu->g_iota = 0;
}
-int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint8_t oc, dmaas;
@@ -814,7 +874,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 6);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
return 0;
}
@@ -823,7 +883,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
fh = env->regs[r1] >> 32;
if (fiba & 0x7) {
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
return 0;
}
@@ -846,11 +906,12 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
}
if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
if (fib.fmt != 0) {
- program_interrupt(env, PGM_OPERAND, 6);
+ s390_program_interrupt(env, PGM_OPERAND, 6, ra);
return 0;
}
@@ -879,7 +940,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
} else if (pbdev->iommu->enabled) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
- } else if (reg_ioat(env, pbdev->iommu, fib)) {
+ } else if (reg_ioat(env, pbdev->iommu, fib, ra)) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
}
@@ -904,7 +965,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
} else {
pci_dereg_ioat(pbdev->iommu);
- if (reg_ioat(env, pbdev->iommu, fib)) {
+ if (reg_ioat(env, pbdev->iommu, fib, ra)) {
cc = ZPCI_PCI_LS_ERR;
s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
}
@@ -935,7 +996,7 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
break;
default:
- program_interrupt(&cpu->env, PGM_OPERAND, 6);
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, 6, ra);
cc = ZPCI_PCI_LS_ERR;
}
@@ -943,7 +1004,8 @@ int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
return 0;
}
-int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
uint8_t dmaas;
@@ -954,7 +1016,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
uint64_t cc = ZPCI_PCI_LS_OK;
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, 6);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
return 0;
}
@@ -968,7 +1030,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
}
if (fiba & 0x7) {
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
return 0;
}
@@ -1026,6 +1088,7 @@ int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
out:
if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return 0;
}
diff --git a/hw/s390x/s390-pci-inst.h b/hw/s390x/s390-pci-inst.h
index 94a959f91c..91c3d61f2a 100644
--- a/hw/s390x/s390-pci-inst.h
+++ b/hw/s390x/s390-pci-inst.h
@@ -162,7 +162,7 @@ typedef struct ClpRspQueryPciGrp {
#define CLP_RSP_QPCIG_MASK_FRAME 0x2
#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
uint8_t fr;
- uint16_t reserved2;
+ uint16_t maxstbl;
uint16_t mui;
uint64_t reserved3;
uint64_t dasm; /* dma address space mask */
@@ -293,13 +293,19 @@ typedef struct ZpciFib {
int pci_dereg_irqs(S390PCIBusDevice *pbdev);
void pci_dereg_ioat(S390PCIIOMMU *iommu);
-int clp_service_call(S390CPU *cpu, uint8_t r2);
-int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
-int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
-int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra);
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra);
int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
- uint8_t ar);
-int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
-int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
+ uint8_t ar, uintptr_t ra);
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra);
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
+ uintptr_t ra);
+
+#define ZPCI_IO_BAR_MIN 0
+#define ZPCI_IO_BAR_MAX 5
+#define ZPCI_CONFIG_BAR 15
#endif
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 6a57f94197..35df7e19c5 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -152,14 +152,38 @@ static void virtio_ccw_register_hcalls(void)
virtio_ccw_hcall_early_printk);
}
+/*
+ * KVM does only support memory slots up to KVM_MEM_MAX_NR_PAGES pages
+ * as the dirty bitmap must be managed by bitops that take an int as
+ * position indicator. If we have a guest beyond that we will split off
+ * new subregions. The split must happen on a segment boundary (1MB).
+ */
+#define KVM_MEM_MAX_NR_PAGES ((1ULL << 31) - 1)
+#define SEG_MSK (~0xfffffULL)
+#define KVM_SLOT_MAX_BYTES ((KVM_MEM_MAX_NR_PAGES * TARGET_PAGE_SIZE) & SEG_MSK)
static void s390_memory_init(ram_addr_t mem_size)
{
MemoryRegion *sysmem = get_system_memory();
- MemoryRegion *ram = g_new(MemoryRegion, 1);
+ ram_addr_t chunk, offset = 0;
+ unsigned int number = 0;
+ gchar *name;
/* allocate RAM for core */
- memory_region_allocate_system_memory(ram, NULL, "s390.ram", mem_size);
- memory_region_add_subregion(sysmem, 0, ram);
+ name = g_strdup_printf("s390.ram");
+ while (mem_size) {
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ uint64_t size = mem_size;
+
+ /* KVM does not allow memslots >= 8 TB */
+ chunk = MIN(size, KVM_SLOT_MAX_BYTES);
+ memory_region_allocate_system_memory(ram, NULL, name, chunk);
+ memory_region_add_subregion(sysmem, offset, ram);
+ mem_size -= chunk;
+ offset += chunk;
+ g_free(name);
+ name = g_strdup_printf("s390.ram.%u", ++number);
+ }
+ g_free(name);
/* Initialize storage key device */
s390_skeys_init();
@@ -302,13 +326,17 @@ static void ccw_init(MachineState *machine)
/*
* Non mcss-e enabled guests only see the devices from the default
* css, which is determined by the value of the squash_mcss property.
- * Note: we must not squash non virtual devices to css 0xFE.
*/
if (css_bus->squash_mcss) {
ret = css_create_css_image(0, true);
} else {
ret = css_create_css_image(VIRTUAL_CSSID, true);
}
+ if (qemu_opt_get(qemu_get_machine_opts(), "s390-squash-mcss")) {
+ warn_report("The machine property 's390-squash-mcss' is deprecated"
+ " (obsoleted by lifting the cssid restrictions).");
+ }
+
assert(ret == 0);
if (css_migration_enabled()) {
css_register_vmstate();
@@ -583,7 +611,7 @@ static inline void s390_machine_initfn(Object *obj)
object_property_add_bool(obj, "s390-squash-mcss",
machine_get_squash_mcss,
machine_set_squash_mcss, NULL);
- object_property_set_description(obj, "s390-squash-mcss",
+ object_property_set_description(obj, "s390-squash-mcss", "(deprecated) "
"enable/disable squashing subchannels into the default css",
NULL);
object_property_set_bool(obj, false, "s390-squash-mcss", NULL);
@@ -639,6 +667,9 @@ bool css_migration_enabled(void)
} \
type_init(ccw_machine_register_##suffix)
+#define CCW_COMPAT_2_11 \
+ HW_COMPAT_2_11
+
#define CCW_COMPAT_2_10 \
HW_COMPAT_2_10
@@ -716,14 +747,30 @@ bool css_migration_enabled(void)
.value = "0",\
},
+static void ccw_machine_2_12_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_2_12_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE(2_12, "2.12", true);
+
static void ccw_machine_2_11_instance_options(MachineState *machine)
{
+ static const S390FeatInit qemu_cpu_feat = { S390_FEAT_LIST_QEMU_V2_11 };
+ ccw_machine_2_12_instance_options(machine);
+
+ /* before 2.12 we emulated the very first z900 */
+ s390_set_qemu_cpu_model(0x2064, 7, 1, qemu_cpu_feat);
}
static void ccw_machine_2_11_class_options(MachineClass *mc)
{
+ ccw_machine_2_12_class_options(mc);
+ SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_11);
}
-DEFINE_CCW_MACHINE(2_11, "2.11", true);
+DEFINE_CCW_MACHINE(2_11, "2.11", false);
static void ccw_machine_2_10_instance_options(MachineState *machine)
{
diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 184515ce94..38f6a8afc9 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -486,7 +486,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
} else {
address_space_stb(&address_space_memory, ccw.cda, vdev->status,
MEMTXATTRS_UNSPECIFIED, NULL);
- sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);;
+ sch->curr_status.scsw.count = ccw.count - sizeof(vdev->status);
ret = 0;
}
break;
@@ -701,7 +701,7 @@ static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
SubchDev *sch;
Error *err = NULL;
- sch = css_create_sch(ccw_dev->devno, true, cbus->squash_mcss, errp);
+ sch = css_create_sch(ccw_dev->devno, cbus->squash_mcss, errp);
if (!sch) {
return;
}
diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c
index 977f7bce1f..965becf31f 100644
--- a/hw/scsi/scsi-bus.c
+++ b/hw/scsi/scsi-bus.c
@@ -540,20 +540,8 @@ static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
if (req->lun != 0) {
const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
- if (fixed_sense) {
- r->buf[0] = 0x70;
- r->buf[2] = sense.key;
- r->buf[10] = 10;
- r->buf[12] = sense.asc;
- r->buf[13] = sense.ascq;
- r->len = MIN(req->cmd.xfer, SCSI_SENSE_LEN);
- } else {
- r->buf[0] = 0x72;
- r->buf[1] = sense.key;
- r->buf[2] = sense.asc;
- r->buf[3] = sense.ascq;
- r->len = 8;
- }
+ r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
+ sense, fixed_sense);
} else {
r->len = scsi_device_get_sense(r->req.dev, r->buf,
MIN(req->cmd.xfer, r->buf_len),
diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c
index 12431177a7..e58833a087 100644
--- a/hw/scsi/scsi-disk.c
+++ b/hw/scsi/scsi-disk.c
@@ -2332,7 +2332,6 @@ static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
static void scsi_realize(SCSIDevice *dev, Error **errp)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
- Error *err = NULL;
if (!s->qdev.conf.blk) {
error_setg(errp, "drive property not set");
@@ -2356,17 +2355,13 @@ static void scsi_realize(SCSIDevice *dev, Error **errp)
}
if (dev->type == TYPE_DISK) {
- blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
return;
}
}
- blkconf_apply_backend_options(&dev->conf,
- blk_is_read_only(s->qdev.conf.blk),
- dev->type == TYPE_DISK, &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_apply_backend_options(&dev->conf,
+ blk_is_read_only(s->qdev.conf.blk),
+ dev->type == TYPE_DISK, errp)) {
return;
}
@@ -3009,6 +3004,7 @@ static const TypeInfo scsi_cd_info = {
static Property scsi_block_properties[] = {
DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
+ DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c
index f7561e23fa..9389ed48e0 100644
--- a/hw/scsi/vhost-user-scsi.c
+++ b/hw/scsi/vhost-user-scsi.c
@@ -18,7 +18,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
-#include "qemu/typedefs.h"
#include "qom/object.h"
#include "hw/fw-path-provider.h"
#include "hw/qdev-core.h"
diff --git a/hw/smbios/smbios_type_38-stub.c b/hw/smbios/smbios_type_38-stub.c
index 9528c2c28e..5b83c9b1f1 100644
--- a/hw/smbios/smbios_type_38-stub.c
+++ b/hw/smbios/smbios_type_38-stub.c
@@ -7,6 +7,7 @@
* See the COPYING file in the top-level directory.
*/
+#include "qemu/osdep.h"
#include "hw/smbios/ipmi.h"
void smbios_build_type_38_table(void)
diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c
index 24c2b8a555..e71648404c 100644
--- a/hw/sparc/sun4m.c
+++ b/hw/sparc/sun4m.c
@@ -36,7 +36,6 @@
#include "net/net.h"
#include "hw/boards.h"
#include "hw/scsi/esp.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "hw/nvram/sun_nvram.h"
#include "hw/nvram/chrp_nvram.h"
diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c
index cb515730c5..5059396bc6 100644
--- a/hw/ssi/aspeed_smc.c
+++ b/hw/ssi/aspeed_smc.c
@@ -26,8 +26,7 @@
#include "hw/sysbus.h"
#include "sysemu/sysemu.h"
#include "qemu/log.h"
-#include "include/qemu/error-report.h"
-#include "exec/address-spaces.h"
+#include "qemu/error-report.h"
#include "hw/ssi/aspeed_smc.h"
diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c
index ef56d35f2c..d8187fadd1 100644
--- a/hw/ssi/xilinx_spips.c
+++ b/hw/ssi/xilinx_spips.c
@@ -27,11 +27,11 @@
#include "sysemu/sysemu.h"
#include "hw/ptimer.h"
#include "qemu/log.h"
-#include "qemu/fifo8.h"
-#include "hw/ssi/ssi.h"
#include "qemu/bitops.h"
#include "hw/ssi/xilinx_spips.h"
#include "qapi/error.h"
+#include "hw/register.h"
+#include "sysemu/dma.h"
#include "migration/blocker.h"
#ifndef XILINX_SPIPS_ERR_DEBUG
@@ -48,7 +48,7 @@
/* config register */
#define R_CONFIG (0x00 / 4)
#define IFMODE (1U << 31)
-#define ENDIAN (1 << 26)
+#define R_CONFIG_ENDIAN (1 << 26)
#define MODEFAIL_GEN_EN (1 << 17)
#define MAN_START_COM (1 << 16)
#define MAN_START_EN (1 << 15)
@@ -66,17 +66,35 @@
/* interrupt mechanism */
#define R_INTR_STATUS (0x04 / 4)
+#define R_INTR_STATUS_RESET (0x104)
#define R_INTR_EN (0x08 / 4)
#define R_INTR_DIS (0x0C / 4)
#define R_INTR_MASK (0x10 / 4)
#define IXR_TX_FIFO_UNDERFLOW (1 << 6)
+/* Poll timeout not implemented */
+#define IXR_RX_FIFO_EMPTY (1 << 11)
+#define IXR_GENERIC_FIFO_FULL (1 << 10)
+#define IXR_GENERIC_FIFO_NOT_FULL (1 << 9)
+#define IXR_TX_FIFO_EMPTY (1 << 8)
+#define IXR_GENERIC_FIFO_EMPTY (1 << 7)
#define IXR_RX_FIFO_FULL (1 << 5)
#define IXR_RX_FIFO_NOT_EMPTY (1 << 4)
#define IXR_TX_FIFO_FULL (1 << 3)
#define IXR_TX_FIFO_NOT_FULL (1 << 2)
#define IXR_TX_FIFO_MODE_FAIL (1 << 1)
#define IXR_RX_FIFO_OVERFLOW (1 << 0)
-#define IXR_ALL ((IXR_TX_FIFO_UNDERFLOW<<1)-1)
+#define IXR_ALL ((1 << 13) - 1)
+#define GQSPI_IXR_MASK 0xFBE
+#define IXR_SELF_CLEAR \
+(IXR_GENERIC_FIFO_EMPTY \
+| IXR_GENERIC_FIFO_FULL \
+| IXR_GENERIC_FIFO_NOT_FULL \
+| IXR_TX_FIFO_EMPTY \
+| IXR_TX_FIFO_FULL \
+| IXR_TX_FIFO_NOT_FULL \
+| IXR_RX_FIFO_EMPTY \
+| IXR_RX_FIFO_FULL \
+| IXR_RX_FIFO_NOT_EMPTY)
#define R_EN (0x14 / 4)
#define R_DELAY (0x18 / 4)
@@ -85,6 +103,9 @@
#define R_SLAVE_IDLE_COUNT (0x24 / 4)
#define R_TX_THRES (0x28 / 4)
#define R_RX_THRES (0x2C / 4)
+#define R_GPIO (0x30 / 4)
+#define R_LPBK_DLY_ADJ (0x38 / 4)
+#define R_LPBK_DLY_ADJ_RESET (0x33)
#define R_TXD1 (0x80 / 4)
#define R_TXD2 (0x84 / 4)
#define R_TXD3 (0x88 / 4)
@@ -93,8 +114,9 @@
#define R_LQSPI_CFG_RESET 0x03A002EB
#define LQSPI_CFG_LQ_MODE (1U << 31)
#define LQSPI_CFG_TWO_MEM (1 << 30)
-#define LQSPI_CFG_SEP_BUS (1 << 30)
+#define LQSPI_CFG_SEP_BUS (1 << 29)
#define LQSPI_CFG_U_PAGE (1 << 28)
+#define LQSPI_CFG_ADDR4 (1 << 27)
#define LQSPI_CFG_MODE_EN (1 << 25)
#define LQSPI_CFG_MODE_WIDTH 8
#define LQSPI_CFG_MODE_SHIFT 16
@@ -102,115 +124,168 @@
#define LQSPI_CFG_DUMMY_SHIFT 8
#define LQSPI_CFG_INST_CODE 0xFF
+#define R_CMND (0xc0 / 4)
+ #define R_CMND_RXFIFO_DRAIN (1 << 19)
+ FIELD(CMND, PARTIAL_BYTE_LEN, 16, 3)
+#define R_CMND_EXT_ADD (1 << 15)
+ FIELD(CMND, RX_DISCARD, 8, 7)
+ FIELD(CMND, DUMMY_CYCLES, 2, 6)
+#define R_CMND_DMA_EN (1 << 1)
+#define R_CMND_PUSH_WAIT (1 << 0)
+#define R_TRANSFER_SIZE (0xc4 / 4)
#define R_LQSPI_STS (0xA4 / 4)
#define LQSPI_STS_WR_RECVD (1 << 1)
#define R_MOD_ID (0xFC / 4)
+#define R_GQSPI_SELECT (0x144 / 4)
+ FIELD(GQSPI_SELECT, GENERIC_QSPI_EN, 0, 1)
+#define R_GQSPI_ISR (0x104 / 4)
+#define R_GQSPI_IER (0x108 / 4)
+#define R_GQSPI_IDR (0x10c / 4)
+#define R_GQSPI_IMR (0x110 / 4)
+#define R_GQSPI_IMR_RESET (0xfbe)
+#define R_GQSPI_TX_THRESH (0x128 / 4)
+#define R_GQSPI_RX_THRESH (0x12c / 4)
+#define R_GQSPI_GPIO (0x130 / 4)
+#define R_GQSPI_LPBK_DLY_ADJ (0x138 / 4)
+#define R_GQSPI_LPBK_DLY_ADJ_RESET (0x33)
+#define R_GQSPI_CNFG (0x100 / 4)
+ FIELD(GQSPI_CNFG, MODE_EN, 30, 2)
+ FIELD(GQSPI_CNFG, GEN_FIFO_START_MODE, 29, 1)
+ FIELD(GQSPI_CNFG, GEN_FIFO_START, 28, 1)
+ FIELD(GQSPI_CNFG, ENDIAN, 26, 1)
+ /* Poll timeout not implemented */
+ FIELD(GQSPI_CNFG, EN_POLL_TIMEOUT, 20, 1)
+ /* QEMU doesnt care about any of these last three */
+ FIELD(GQSPI_CNFG, BR, 3, 3)
+ FIELD(GQSPI_CNFG, CPH, 2, 1)
+ FIELD(GQSPI_CNFG, CPL, 1, 1)
+#define R_GQSPI_GEN_FIFO (0x140 / 4)
+#define R_GQSPI_TXD (0x11c / 4)
+#define R_GQSPI_RXD (0x120 / 4)
+#define R_GQSPI_FIFO_CTRL (0x14c / 4)
+ FIELD(GQSPI_FIFO_CTRL, RX_FIFO_RESET, 2, 1)
+ FIELD(GQSPI_FIFO_CTRL, TX_FIFO_RESET, 1, 1)
+ FIELD(GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET, 0, 1)
+#define R_GQSPI_GFIFO_THRESH (0x150 / 4)
+#define R_GQSPI_DATA_STS (0x15c / 4)
+/* We use the snapshot register to hold the core state for the currently
+ * or most recently executed command. So the generic fifo format is defined
+ * for the snapshot register
+ */
+#define R_GQSPI_GF_SNAPSHOT (0x160 / 4)
+ FIELD(GQSPI_GF_SNAPSHOT, POLL, 19, 1)
+ FIELD(GQSPI_GF_SNAPSHOT, STRIPE, 18, 1)
+ FIELD(GQSPI_GF_SNAPSHOT, RECIEVE, 17, 1)
+ FIELD(GQSPI_GF_SNAPSHOT, TRANSMIT, 16, 1)
+ FIELD(GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT, 14, 2)
+ FIELD(GQSPI_GF_SNAPSHOT, CHIP_SELECT, 12, 2)
+ FIELD(GQSPI_GF_SNAPSHOT, SPI_MODE, 10, 2)
+ FIELD(GQSPI_GF_SNAPSHOT, EXPONENT, 9, 1)
+ FIELD(GQSPI_GF_SNAPSHOT, DATA_XFER, 8, 1)
+ FIELD(GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA, 0, 8)
+#define R_GQSPI_MOD_ID (0x1fc / 4)
+#define R_GQSPI_MOD_ID_RESET (0x10a0000)
+
+#define R_QSPIDMA_DST_CTRL (0x80c / 4)
+#define R_QSPIDMA_DST_CTRL_RESET (0x803ffa00)
+#define R_QSPIDMA_DST_I_MASK (0x820 / 4)
+#define R_QSPIDMA_DST_I_MASK_RESET (0xfe)
+#define R_QSPIDMA_DST_CTRL2 (0x824 / 4)
+#define R_QSPIDMA_DST_CTRL2_RESET (0x081bfff8)
+
/* size of TXRX FIFOs */
-#define RXFF_A 32
-#define TXFF_A 32
+#define RXFF_A (128)
+#define TXFF_A (128)
#define RXFF_A_Q (64 * 4)
#define TXFF_A_Q (64 * 4)
/* 16MB per linear region */
#define LQSPI_ADDRESS_BITS 24
-/* Bite off 4k chunks at a time */
-#define LQSPI_CACHE_SIZE 1024
#define SNOOP_CHECKING 0xFF
-#define SNOOP_NONE 0xFE
+#define SNOOP_ADDR 0xF0
+#define SNOOP_NONE 0xEE
#define SNOOP_STRIPING 0
-typedef enum {
- READ = 0x3,
- FAST_READ = 0xb,
- DOR = 0x3b,
- QOR = 0x6b,
- DIOR = 0xbb,
- QIOR = 0xeb,
-
- PP = 0x2,
- DPP = 0xa2,
- QPP = 0x32,
-} FlashCMD;
-
-typedef struct {
- XilinxSPIPS parent_obj;
-
- uint8_t lqspi_buf[LQSPI_CACHE_SIZE];
- hwaddr lqspi_cached_addr;
- Error *migration_blocker;
- bool mmio_execution_enabled;
-} XilinxQSPIPS;
-
-typedef struct XilinxSPIPSClass {
- SysBusDeviceClass parent_class;
-
- const MemoryRegionOps *reg_ops;
-
- uint32_t rx_fifo_size;
- uint32_t tx_fifo_size;
-} XilinxSPIPSClass;
-
static inline int num_effective_busses(XilinxSPIPS *s)
{
return (s->regs[R_LQSPI_CFG] & LQSPI_CFG_SEP_BUS &&
s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM) ? s->num_busses : 1;
}
-static inline bool xilinx_spips_cs_is_set(XilinxSPIPS *s, int i, int field)
-{
- return ~field & (1 << i) && (s->regs[R_CONFIG] & MANUAL_CS
- || !fifo8_is_empty(&s->tx_fifo));
-}
-
-static void xilinx_spips_update_cs_lines(XilinxSPIPS *s)
+static void xilinx_spips_update_cs(XilinxSPIPS *s, int field)
{
- int i, j;
- bool found = false;
- int field = s->regs[R_CONFIG] >> CS_SHIFT;
+ int i;
for (i = 0; i < s->num_cs; i++) {
- for (j = 0; j < num_effective_busses(s); j++) {
- int upage = !!(s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE);
- int cs_to_set = (j * s->num_cs + i + upage) %
- (s->num_cs * s->num_busses);
-
- if (xilinx_spips_cs_is_set(s, i, field) && !found) {
- DB_PRINT_L(0, "selecting slave %d\n", i);
- qemu_set_irq(s->cs_lines[cs_to_set], 0);
- } else {
- DB_PRINT_L(0, "deselecting slave %d\n", i);
- qemu_set_irq(s->cs_lines[cs_to_set], 1);
- }
- }
- if (xilinx_spips_cs_is_set(s, i, field)) {
- found = true;
+ bool old_state = s->cs_lines_state[i];
+ bool new_state = field & (1 << i);
+
+ if (old_state != new_state) {
+ s->cs_lines_state[i] = new_state;
+ s->rx_discard = ARRAY_FIELD_EX32(s->regs, CMND, RX_DISCARD);
+ DB_PRINT_L(1, "%sselecting slave %d\n", new_state ? "" : "de", i);
}
+ qemu_set_irq(s->cs_lines[i], !new_state);
}
- if (!found) {
+ if (!(field & ((1 << s->num_cs) - 1))) {
s->snoop_state = SNOOP_CHECKING;
+ s->cmd_dummies = 0;
+ s->link_state = 1;
+ s->link_state_next = 1;
+ s->link_state_next_when = 0;
DB_PRINT_L(1, "moving to snoop check state\n");
}
}
+static void xlnx_zynqmp_qspips_update_cs_lines(XlnxZynqMPQSPIPS *s)
+{
+ if (s->regs[R_GQSPI_GF_SNAPSHOT]) {
+ int field = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, CHIP_SELECT);
+ xilinx_spips_update_cs(XILINX_SPIPS(s), field);
+ }
+}
+
+static void xilinx_spips_update_cs_lines(XilinxSPIPS *s)
+{
+ int field = ~((s->regs[R_CONFIG] & CS) >> CS_SHIFT);
+
+ /* In dual parallel, mirror low CS to both */
+ if (num_effective_busses(s) == 2) {
+ /* Single bit chip-select for qspi */
+ field &= 0x1;
+ field |= field << 1;
+ /* Dual stack U-Page */
+ } else if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_TWO_MEM &&
+ s->regs[R_LQSPI_STS] & LQSPI_CFG_U_PAGE) {
+ /* Single bit chip-select for qspi */
+ field &= 0x1;
+ /* change from CS0 to CS1 */
+ field <<= 1;
+ }
+ /* Auto CS */
+ if (!(s->regs[R_CONFIG] & MANUAL_CS) &&
+ fifo8_is_empty(&s->tx_fifo)) {
+ field = 0;
+ }
+ xilinx_spips_update_cs(s, field);
+}
+
static void xilinx_spips_update_ixr(XilinxSPIPS *s)
{
- if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE) {
- return;
+ if (!(s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE)) {
+ s->regs[R_INTR_STATUS] &= ~IXR_SELF_CLEAR;
+ s->regs[R_INTR_STATUS] |=
+ (fifo8_is_full(&s->rx_fifo) ? IXR_RX_FIFO_FULL : 0) |
+ (s->rx_fifo.num >= s->regs[R_RX_THRES] ?
+ IXR_RX_FIFO_NOT_EMPTY : 0) |
+ (fifo8_is_full(&s->tx_fifo) ? IXR_TX_FIFO_FULL : 0) |
+ (fifo8_is_empty(&s->tx_fifo) ? IXR_TX_FIFO_EMPTY : 0) |
+ (s->tx_fifo.num < s->regs[R_TX_THRES] ? IXR_TX_FIFO_NOT_FULL : 0);
}
- /* These are set/cleared as they occur */
- s->regs[R_INTR_STATUS] &= (IXR_TX_FIFO_UNDERFLOW | IXR_RX_FIFO_OVERFLOW |
- IXR_TX_FIFO_MODE_FAIL);
- /* these are pure functions of fifo state, set them here */
- s->regs[R_INTR_STATUS] |=
- (fifo8_is_full(&s->rx_fifo) ? IXR_RX_FIFO_FULL : 0) |
- (s->rx_fifo.num >= s->regs[R_RX_THRES] ? IXR_RX_FIFO_NOT_EMPTY : 0) |
- (fifo8_is_full(&s->tx_fifo) ? IXR_TX_FIFO_FULL : 0) |
- (s->tx_fifo.num < s->regs[R_TX_THRES] ? IXR_TX_FIFO_NOT_FULL : 0);
- /* drive external interrupt pin */
int new_irqline = !!(s->regs[R_INTR_MASK] & s->regs[R_INTR_STATUS] &
IXR_ALL);
if (new_irqline != s->irqline) {
@@ -219,14 +294,42 @@ static void xilinx_spips_update_ixr(XilinxSPIPS *s)
}
}
+static void xlnx_zynqmp_qspips_update_ixr(XlnxZynqMPQSPIPS *s)
+{
+ uint32_t gqspi_int;
+ int new_irqline;
+
+ s->regs[R_GQSPI_ISR] &= ~IXR_SELF_CLEAR;
+ s->regs[R_GQSPI_ISR] |=
+ (fifo32_is_empty(&s->fifo_g) ? IXR_GENERIC_FIFO_EMPTY : 0) |
+ (fifo32_is_full(&s->fifo_g) ? IXR_GENERIC_FIFO_FULL : 0) |
+ (s->fifo_g.fifo.num < s->regs[R_GQSPI_GFIFO_THRESH] ?
+ IXR_GENERIC_FIFO_NOT_FULL : 0) |
+ (fifo8_is_empty(&s->rx_fifo_g) ? IXR_RX_FIFO_EMPTY : 0) |
+ (fifo8_is_full(&s->rx_fifo_g) ? IXR_RX_FIFO_FULL : 0) |
+ (s->rx_fifo_g.num >= s->regs[R_GQSPI_RX_THRESH] ?
+ IXR_RX_FIFO_NOT_EMPTY : 0) |
+ (fifo8_is_empty(&s->tx_fifo_g) ? IXR_TX_FIFO_EMPTY : 0) |
+ (fifo8_is_full(&s->tx_fifo_g) ? IXR_TX_FIFO_FULL : 0) |
+ (s->tx_fifo_g.num < s->regs[R_GQSPI_TX_THRESH] ?
+ IXR_TX_FIFO_NOT_FULL : 0);
+
+ /* GQSPI Interrupt Trigger Status */
+ gqspi_int = (~s->regs[R_GQSPI_IMR]) & s->regs[R_GQSPI_ISR] & GQSPI_IXR_MASK;
+ new_irqline = !!(gqspi_int & IXR_ALL);
+
+ /* drive external interrupt pin */
+ if (new_irqline != s->gqspi_irqline) {
+ s->gqspi_irqline = new_irqline;
+ qemu_set_irq(XILINX_SPIPS(s)->irq, s->gqspi_irqline);
+ }
+}
+
static void xilinx_spips_reset(DeviceState *d)
{
XilinxSPIPS *s = XILINX_SPIPS(d);
- int i;
- for (i = 0; i < XLNX_SPIPS_R_MAX; i++) {
- s->regs[i] = 0;
- }
+ memset(s->regs, 0, sizeof(s->regs));
fifo8_reset(&s->rx_fifo);
fifo8_reset(&s->rx_fifo);
@@ -238,19 +341,54 @@ static void xilinx_spips_reset(DeviceState *d)
/* FIXME: move magic number definition somewhere sensible */
s->regs[R_MOD_ID] = 0x01090106;
s->regs[R_LQSPI_CFG] = R_LQSPI_CFG_RESET;
+ s->link_state = 1;
+ s->link_state_next = 1;
+ s->link_state_next_when = 0;
s->snoop_state = SNOOP_CHECKING;
+ s->cmd_dummies = 0;
+ s->man_start_com = false;
xilinx_spips_update_ixr(s);
xilinx_spips_update_cs_lines(s);
}
-/* N way (num) in place bit striper. Lay out row wise bits (LSB to MSB)
+static void xlnx_zynqmp_qspips_reset(DeviceState *d)
+{
+ XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(d);
+
+ xilinx_spips_reset(d);
+
+ memset(s->regs, 0, sizeof(s->regs));
+
+ fifo8_reset(&s->rx_fifo_g);
+ fifo8_reset(&s->rx_fifo_g);
+ fifo32_reset(&s->fifo_g);
+ s->regs[R_INTR_STATUS] = R_INTR_STATUS_RESET;
+ s->regs[R_GPIO] = 1;
+ s->regs[R_LPBK_DLY_ADJ] = R_LPBK_DLY_ADJ_RESET;
+ s->regs[R_GQSPI_GFIFO_THRESH] = 0x10;
+ s->regs[R_MOD_ID] = 0x01090101;
+ s->regs[R_GQSPI_IMR] = R_GQSPI_IMR_RESET;
+ s->regs[R_GQSPI_TX_THRESH] = 1;
+ s->regs[R_GQSPI_RX_THRESH] = 1;
+ s->regs[R_GQSPI_GPIO] = 1;
+ s->regs[R_GQSPI_LPBK_DLY_ADJ] = R_GQSPI_LPBK_DLY_ADJ_RESET;
+ s->regs[R_GQSPI_MOD_ID] = R_GQSPI_MOD_ID_RESET;
+ s->regs[R_QSPIDMA_DST_CTRL] = R_QSPIDMA_DST_CTRL_RESET;
+ s->regs[R_QSPIDMA_DST_I_MASK] = R_QSPIDMA_DST_I_MASK_RESET;
+ s->regs[R_QSPIDMA_DST_CTRL2] = R_QSPIDMA_DST_CTRL2_RESET;
+ s->man_start_com_g = false;
+ s->gqspi_irqline = 0;
+ xlnx_zynqmp_qspips_update_ixr(s);
+}
+
+/* N way (num) in place bit striper. Lay out row wise bits (MSB to LSB)
* column wise (from element 0 to N-1). num is the length of x, and dir
* reverses the direction of the transform. Best illustrated by example:
* Each digit in the below array is a single bit (num == 3):
*
- * {{ 76543210, } ----- stripe (dir == false) -----> {{ FCheb630, }
- * { hgfedcba, } { GDAfc741, }
- * { HGFEDCBA, }} <---- upstripe (dir == true) ----- { HEBgda52, }}
+ * {{ 76543210, } ----- stripe (dir == false) -----> {{ 741gdaFC, }
+ * { hgfedcba, } { 630fcHEB, }
+ * { HGFEDCBA, }} <---- upstripe (dir == true) ----- { 52hebGDA, }}
*/
static inline void stripe8(uint8_t *x, int num, bool dir)
@@ -258,34 +396,188 @@ static inline void stripe8(uint8_t *x, int num, bool dir)
uint8_t r[num];
memset(r, 0, sizeof(uint8_t) * num);
int idx[2] = {0, 0};
- int bit[2] = {0, 0};
+ int bit[2] = {0, 7};
int d = dir;
for (idx[0] = 0; idx[0] < num; ++idx[0]) {
- for (bit[0] = 0; bit[0] < 8; ++bit[0]) {
- r[idx[d]] |= x[idx[!d]] & 1 << bit[!d] ? 1 << bit[d] : 0;
+ for (bit[0] = 7; bit[0] >= 0; bit[0]--) {
+ r[idx[!d]] |= x[idx[d]] & 1 << bit[d] ? 1 << bit[!d] : 0;
idx[1] = (idx[1] + 1) % num;
if (!idx[1]) {
- bit[1]++;
+ bit[1]--;
}
}
}
memcpy(x, r, sizeof(uint8_t) * num);
}
+static void xlnx_zynqmp_qspips_flush_fifo_g(XlnxZynqMPQSPIPS *s)
+{
+ while (s->regs[R_GQSPI_DATA_STS] || !fifo32_is_empty(&s->fifo_g)) {
+ uint8_t tx_rx[2] = { 0 };
+ int num_stripes = 1;
+ uint8_t busses;
+ int i;
+
+ if (!s->regs[R_GQSPI_DATA_STS]) {
+ uint8_t imm;
+
+ s->regs[R_GQSPI_GF_SNAPSHOT] = fifo32_pop(&s->fifo_g);
+ DB_PRINT_L(0, "GQSPI command: %x\n", s->regs[R_GQSPI_GF_SNAPSHOT]);
+ if (!s->regs[R_GQSPI_GF_SNAPSHOT]) {
+ DB_PRINT_L(0, "Dummy GQSPI Delay Command Entry, Do nothing");
+ continue;
+ }
+ xlnx_zynqmp_qspips_update_cs_lines(s);
+
+ imm = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA);
+ if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) {
+ /* immedate transfer */
+ if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) ||
+ ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) {
+ s->regs[R_GQSPI_DATA_STS] = 1;
+ /* CS setup/hold - do nothing */
+ } else {
+ s->regs[R_GQSPI_DATA_STS] = 0;
+ }
+ } else if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, EXPONENT)) {
+ if (imm > 31) {
+ qemu_log_mask(LOG_UNIMP, "QSPI exponential transfer too"
+ " long - 2 ^ %" PRId8 " requested\n", imm);
+ }
+ s->regs[R_GQSPI_DATA_STS] = 1ul << imm;
+ } else {
+ s->regs[R_GQSPI_DATA_STS] = imm;
+ }
+ }
+ /* Zero length transfer check */
+ if (!s->regs[R_GQSPI_DATA_STS]) {
+ continue;
+ }
+ if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE) &&
+ fifo8_is_full(&s->rx_fifo_g)) {
+ /* No space in RX fifo for transfer - try again later */
+ return;
+ }
+ if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, STRIPE) &&
+ (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT) ||
+ ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE))) {
+ num_stripes = 2;
+ }
+ if (!ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_XFER)) {
+ tx_rx[0] = ARRAY_FIELD_EX32(s->regs,
+ GQSPI_GF_SNAPSHOT, IMMEDIATE_DATA);
+ } else if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, TRANSMIT)) {
+ for (i = 0; i < num_stripes; ++i) {
+ if (!fifo8_is_empty(&s->tx_fifo_g)) {
+ tx_rx[i] = fifo8_pop(&s->tx_fifo_g);
+ s->tx_fifo_g_align++;
+ } else {
+ return;
+ }
+ }
+ }
+ if (num_stripes == 1) {
+ /* mirror */
+ tx_rx[1] = tx_rx[0];
+ }
+ busses = ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, DATA_BUS_SELECT);
+ for (i = 0; i < 2; ++i) {
+ DB_PRINT_L(1, "bus %d tx = %02x\n", i, tx_rx[i]);
+ tx_rx[i] = ssi_transfer(XILINX_SPIPS(s)->spi[i], tx_rx[i]);
+ DB_PRINT_L(1, "bus %d rx = %02x\n", i, tx_rx[i]);
+ }
+ if (s->regs[R_GQSPI_DATA_STS] > 1 &&
+ busses == 0x3 && num_stripes == 2) {
+ s->regs[R_GQSPI_DATA_STS] -= 2;
+ } else if (s->regs[R_GQSPI_DATA_STS] > 0) {
+ s->regs[R_GQSPI_DATA_STS]--;
+ }
+ if (ARRAY_FIELD_EX32(s->regs, GQSPI_GF_SNAPSHOT, RECIEVE)) {
+ for (i = 0; i < 2; ++i) {
+ if (busses & (1 << i)) {
+ DB_PRINT_L(1, "bus %d push_byte = %02x\n", i, tx_rx[i]);
+ fifo8_push(&s->rx_fifo_g, tx_rx[i]);
+ s->rx_fifo_g_align++;
+ }
+ }
+ }
+ if (!s->regs[R_GQSPI_DATA_STS]) {
+ for (; s->tx_fifo_g_align % 4; s->tx_fifo_g_align++) {
+ fifo8_pop(&s->tx_fifo_g);
+ }
+ for (; s->rx_fifo_g_align % 4; s->rx_fifo_g_align++) {
+ fifo8_push(&s->rx_fifo_g, 0);
+ }
+ }
+ }
+}
+
+static int xilinx_spips_num_dummies(XilinxQSPIPS *qs, uint8_t command)
+{
+ if (!qs) {
+ /* The SPI device is not a QSPI device */
+ return -1;
+ }
+
+ switch (command) { /* check for dummies */
+ case READ: /* no dummy bytes/cycles */
+ case PP:
+ case DPP:
+ case QPP:
+ case READ_4:
+ case PP_4:
+ case QPP_4:
+ return 0;
+ case FAST_READ:
+ case DOR:
+ case QOR:
+ case DOR_4:
+ case QOR_4:
+ return 1;
+ case DIOR:
+ case FAST_READ_4:
+ case DIOR_4:
+ return 2;
+ case QIOR:
+ case QIOR_4:
+ return 5;
+ default:
+ return -1;
+ }
+}
+
+static inline uint8_t get_addr_length(XilinxSPIPS *s, uint8_t cmd)
+{
+ switch (cmd) {
+ case PP_4:
+ case QPP_4:
+ case READ_4:
+ case QIOR_4:
+ case FAST_READ_4:
+ case DOR_4:
+ case QOR_4:
+ case DIOR_4:
+ return 4;
+ default:
+ return (s->regs[R_CMND] & R_CMND_EXT_ADD) ? 4 : 3;
+ }
+}
+
static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
{
int debug_level = 0;
+ XilinxQSPIPS *q = (XilinxQSPIPS *) object_dynamic_cast(OBJECT(s),
+ TYPE_XILINX_QSPIPS);
for (;;) {
int i;
uint8_t tx = 0;
uint8_t tx_rx[num_effective_busses(s)];
+ uint8_t dummy_cycles = 0;
+ uint8_t addr_length;
if (fifo8_is_empty(&s->tx_fifo)) {
- if (!(s->regs[R_LQSPI_CFG] & LQSPI_CFG_LQ_MODE)) {
- s->regs[R_INTR_STATUS] |= IXR_TX_FIFO_UNDERFLOW;
- }
xilinx_spips_update_ixr(s);
return;
} else if (s->snoop_state == SNOOP_STRIPING) {
@@ -293,53 +585,102 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
tx_rx[i] = fifo8_pop(&s->tx_fifo);
}
stripe8(tx_rx, num_effective_busses(s), false);
- } else {
+ } else if (s->snoop_state >= SNOOP_ADDR) {
tx = fifo8_pop(&s->tx_fifo);
for (i = 0; i < num_effective_busses(s); ++i) {
tx_rx[i] = tx;
}
+ } else {
+ /* Extract a dummy byte and generate dummy cycles according to the
+ * link state */
+ tx = fifo8_pop(&s->tx_fifo);
+ dummy_cycles = 8 / s->link_state;
}
for (i = 0; i < num_effective_busses(s); ++i) {
- DB_PRINT_L(debug_level, "tx = %02x\n", tx_rx[i]);
- tx_rx[i] = ssi_transfer(s->spi[i], (uint32_t)tx_rx[i]);
- DB_PRINT_L(debug_level, "rx = %02x\n", tx_rx[i]);
+ int bus = num_effective_busses(s) - 1 - i;
+ if (dummy_cycles) {
+ int d;
+ for (d = 0; d < dummy_cycles; ++d) {
+ tx_rx[0] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[0]);
+ }
+ } else {
+ DB_PRINT_L(debug_level, "tx = %02x\n", tx_rx[i]);
+ tx_rx[i] = ssi_transfer(s->spi[bus], (uint32_t)tx_rx[i]);
+ DB_PRINT_L(debug_level, "rx = %02x\n", tx_rx[i]);
+ }
}
- if (fifo8_is_full(&s->rx_fifo)) {
+ if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) {
+ DB_PRINT_L(debug_level, "dircarding drained rx byte\n");
+ /* Do nothing */
+ } else if (s->rx_discard) {
+ DB_PRINT_L(debug_level, "dircarding discarded rx byte\n");
+ s->rx_discard -= 8 / s->link_state;
+ } else if (fifo8_is_full(&s->rx_fifo)) {
s->regs[R_INTR_STATUS] |= IXR_RX_FIFO_OVERFLOW;
DB_PRINT_L(0, "rx FIFO overflow");
} else if (s->snoop_state == SNOOP_STRIPING) {
stripe8(tx_rx, num_effective_busses(s), true);
for (i = 0; i < num_effective_busses(s); ++i) {
fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[i]);
+ DB_PRINT_L(debug_level, "pushing striped rx byte\n");
}
} else {
+ DB_PRINT_L(debug_level, "pushing unstriped rx byte\n");
fifo8_push(&s->rx_fifo, (uint8_t)tx_rx[0]);
}
+ if (s->link_state_next_when) {
+ s->link_state_next_when--;
+ if (!s->link_state_next_when) {
+ s->link_state = s->link_state_next;
+ }
+ }
+
DB_PRINT_L(debug_level, "initial snoop state: %x\n",
(unsigned)s->snoop_state);
switch (s->snoop_state) {
case (SNOOP_CHECKING):
- switch (tx) { /* new instruction code */
- case READ: /* 3 address bytes, no dummy bytes/cycles */
- case PP:
+ /* Store the count of dummy bytes in the txfifo */
+ s->cmd_dummies = xilinx_spips_num_dummies(q, tx);
+ addr_length = get_addr_length(s, tx);
+ if (s->cmd_dummies < 0) {
+ s->snoop_state = SNOOP_NONE;
+ } else {
+ s->snoop_state = SNOOP_ADDR + addr_length - 1;
+ }
+ switch (tx) {
case DPP:
- case QPP:
- s->snoop_state = 3;
- break;
- case FAST_READ: /* 3 address bytes, 1 dummy byte */
case DOR:
+ case DOR_4:
+ s->link_state_next = 2;
+ s->link_state_next_when = addr_length + s->cmd_dummies;
+ break;
+ case QPP:
+ case QPP_4:
case QOR:
- case DIOR: /* FIXME: these vary between vendor - set to spansion */
- s->snoop_state = 4;
+ case QOR_4:
+ s->link_state_next = 4;
+ s->link_state_next_when = addr_length + s->cmd_dummies;
+ break;
+ case DIOR:
+ case DIOR_4:
+ s->link_state = 2;
break;
- case QIOR: /* 3 address bytes, 2 dummy bytes */
- s->snoop_state = 6;
+ case QIOR:
+ case QIOR_4:
+ s->link_state = 4;
break;
- default:
+ }
+ break;
+ case (SNOOP_ADDR):
+ /* Address has been transmitted, transmit dummy cycles now if
+ * needed */
+ if (s->cmd_dummies < 0) {
s->snoop_state = SNOOP_NONE;
+ } else {
+ s->snoop_state = s->cmd_dummies;
}
break;
case (SNOOP_STRIPING):
@@ -358,12 +699,128 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s)
}
}
-static inline void rx_data_bytes(XilinxSPIPS *s, uint8_t *value, int max)
+static inline void tx_data_bytes(Fifo8 *fifo, uint32_t value, int num, bool be)
+{
+ int i;
+ for (i = 0; i < num && !fifo8_is_full(fifo); ++i) {
+ if (be) {
+ fifo8_push(fifo, (uint8_t)(value >> 24));
+ value <<= 8;
+ } else {
+ fifo8_push(fifo, (uint8_t)value);
+ value >>= 8;
+ }
+ }
+}
+
+static void xilinx_spips_check_zero_pump(XilinxSPIPS *s)
+{
+ if (!s->regs[R_TRANSFER_SIZE]) {
+ return;
+ }
+ if (!fifo8_is_empty(&s->tx_fifo) && s->regs[R_CMND] & R_CMND_PUSH_WAIT) {
+ return;
+ }
+ /*
+ * The zero pump must never fill tx fifo such that rx overflow is
+ * possible
+ */
+ while (s->regs[R_TRANSFER_SIZE] &&
+ s->rx_fifo.num + s->tx_fifo.num < RXFF_A_Q - 3) {
+ /* endianess just doesn't matter when zero pumping */
+ tx_data_bytes(&s->tx_fifo, 0, 4, false);
+ s->regs[R_TRANSFER_SIZE] &= ~0x03ull;
+ s->regs[R_TRANSFER_SIZE] -= 4;
+ }
+}
+
+static void xilinx_spips_check_flush(XilinxSPIPS *s)
+{
+ if (s->man_start_com ||
+ (!fifo8_is_empty(&s->tx_fifo) &&
+ !(s->regs[R_CONFIG] & MAN_START_EN))) {
+ xilinx_spips_check_zero_pump(s);
+ xilinx_spips_flush_txfifo(s);
+ }
+ if (fifo8_is_empty(&s->tx_fifo) && !s->regs[R_TRANSFER_SIZE]) {
+ s->man_start_com = false;
+ }
+ xilinx_spips_update_ixr(s);
+}
+
+static void xlnx_zynqmp_qspips_check_flush(XlnxZynqMPQSPIPS *s)
+{
+ bool gqspi_has_work = s->regs[R_GQSPI_DATA_STS] ||
+ !fifo32_is_empty(&s->fifo_g);
+
+ if (ARRAY_FIELD_EX32(s->regs, GQSPI_SELECT, GENERIC_QSPI_EN)) {
+ if (s->man_start_com_g || (gqspi_has_work &&
+ !ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, GEN_FIFO_START_MODE))) {
+ xlnx_zynqmp_qspips_flush_fifo_g(s);
+ }
+ } else {
+ xilinx_spips_check_flush(XILINX_SPIPS(s));
+ }
+ if (!gqspi_has_work) {
+ s->man_start_com_g = false;
+ }
+ xlnx_zynqmp_qspips_update_ixr(s);
+}
+
+static inline int rx_data_bytes(Fifo8 *fifo, uint8_t *value, int max)
{
int i;
- for (i = 0; i < max && !fifo8_is_empty(&s->rx_fifo); ++i) {
- value[i] = fifo8_pop(&s->rx_fifo);
+ for (i = 0; i < max && !fifo8_is_empty(fifo); ++i) {
+ value[i] = fifo8_pop(fifo);
+ }
+ return max - i;
+}
+
+static const void *pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num)
+{
+ void *ret;
+
+ if (max == 0 || max > fifo->num) {
+ abort();
+ }
+ *num = MIN(fifo->capacity - fifo->head, max);
+ ret = &fifo->data[fifo->head];
+ fifo->head += *num;
+ fifo->head %= fifo->capacity;
+ fifo->num -= *num;
+ return ret;
+}
+
+static void xlnx_zynqmp_qspips_notify(void *opaque)
+{
+ XlnxZynqMPQSPIPS *rq = XLNX_ZYNQMP_QSPIPS(opaque);
+ XilinxSPIPS *s = XILINX_SPIPS(rq);
+ Fifo8 *recv_fifo;
+
+ if (ARRAY_FIELD_EX32(rq->regs, GQSPI_SELECT, GENERIC_QSPI_EN)) {
+ if (!(ARRAY_FIELD_EX32(rq->regs, GQSPI_CNFG, MODE_EN) == 2)) {
+ return;
+ }
+ recv_fifo = &rq->rx_fifo_g;
+ } else {
+ if (!(s->regs[R_CMND] & R_CMND_DMA_EN)) {
+ return;
+ }
+ recv_fifo = &s->rx_fifo;
+ }
+ while (recv_fifo->num >= 4
+ && stream_can_push(rq->dma, xlnx_zynqmp_qspips_notify, rq))
+ {
+ size_t ret;
+ uint32_t num;
+ const void *rxd = pop_buf(recv_fifo, 4, &num);
+
+ memcpy(rq->dma_buf, rxd, num);
+
+ ret = stream_push(rq->dma, rq->dma_buf, 4);
+ assert(ret == 4);
+ xlnx_zynqmp_qspips_check_flush(rq);
}
}
@@ -374,6 +831,7 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
uint32_t mask = ~0;
uint32_t ret;
uint8_t rx_buf[4];
+ int shortfall;
addr >>= 2;
switch (addr) {
@@ -384,6 +842,7 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
ret = s->regs[addr] & IXR_ALL;
s->regs[addr] = 0;
DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret);
+ xilinx_spips_update_ixr(s);
return ret;
case R_INTR_MASK:
mask = IXR_ALL;
@@ -404,10 +863,15 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
break;
case R_RX_DATA:
memset(rx_buf, 0, sizeof(rx_buf));
- rx_data_bytes(s, rx_buf, s->num_txrx_bytes);
- ret = s->regs[R_CONFIG] & ENDIAN ? cpu_to_be32(*(uint32_t *)rx_buf)
- : cpu_to_le32(*(uint32_t *)rx_buf);
+ shortfall = rx_data_bytes(&s->rx_fifo, rx_buf, s->num_txrx_bytes);
+ ret = s->regs[R_CONFIG] & R_CONFIG_ENDIAN ?
+ cpu_to_be32(*(uint32_t *)rx_buf) :
+ cpu_to_le32(*(uint32_t *)rx_buf);
+ if (!(s->regs[R_CONFIG] & R_CONFIG_ENDIAN)) {
+ ret <<= 8 * shortfall;
+ }
DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr * 4, ret);
+ xilinx_spips_check_flush(s);
xilinx_spips_update_ixr(s);
return ret;
}
@@ -417,16 +881,39 @@ static uint64_t xilinx_spips_read(void *opaque, hwaddr addr,
}
-static inline void tx_data_bytes(XilinxSPIPS *s, uint32_t value, int num)
+static uint64_t xlnx_zynqmp_qspips_read(void *opaque,
+ hwaddr addr, unsigned size)
{
- int i;
- for (i = 0; i < num && !fifo8_is_full(&s->tx_fifo); ++i) {
- if (s->regs[R_CONFIG] & ENDIAN) {
- fifo8_push(&s->tx_fifo, (uint8_t)(value >> 24));
- value <<= 8;
- } else {
- fifo8_push(&s->tx_fifo, (uint8_t)value);
- value >>= 8;
+ XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque);
+ uint32_t reg = addr / 4;
+ uint32_t ret;
+ uint8_t rx_buf[4];
+ int shortfall;
+
+ if (reg <= R_MOD_ID) {
+ return xilinx_spips_read(opaque, addr, size);
+ } else {
+ switch (reg) {
+ case R_GQSPI_RXD:
+ if (fifo8_is_empty(&s->rx_fifo_g)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Read from empty GQSPI RX FIFO\n");
+ return 0;
+ }
+ memset(rx_buf, 0, sizeof(rx_buf));
+ shortfall = rx_data_bytes(&s->rx_fifo_g, rx_buf,
+ XILINX_SPIPS(s)->num_txrx_bytes);
+ ret = ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN) ?
+ cpu_to_be32(*(uint32_t *)rx_buf) :
+ cpu_to_le32(*(uint32_t *)rx_buf);
+ if (!ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN)) {
+ ret <<= 8 * shortfall;
+ }
+ xlnx_zynqmp_qspips_check_flush(s);
+ xlnx_zynqmp_qspips_update_ixr(s);
+ return ret;
+ default:
+ return s->regs[reg];
}
}
}
@@ -435,7 +922,6 @@ static void xilinx_spips_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
int mask = ~0;
- int man_start_com = 0;
XilinxSPIPS *s = opaque;
DB_PRINT_L(0, "addr=" TARGET_FMT_plx " = %x\n", addr, (unsigned)value);
@@ -443,8 +929,8 @@ static void xilinx_spips_write(void *opaque, hwaddr addr,
switch (addr) {
case R_CONFIG:
mask = ~(R_CONFIG_RSVD | MAN_START_COM);
- if (value & MAN_START_COM) {
- man_start_com = 1;
+ if ((value & MAN_START_COM) && (s->regs[R_CONFIG] & MAN_START_EN)) {
+ s->man_start_com = true;
}
break;
case R_INTR_STATUS:
@@ -471,25 +957,26 @@ static void xilinx_spips_write(void *opaque, hwaddr addr,
mask = 0;
break;
case R_TX_DATA:
- tx_data_bytes(s, (uint32_t)value, s->num_txrx_bytes);
+ tx_data_bytes(&s->tx_fifo, (uint32_t)value, s->num_txrx_bytes,
+ s->regs[R_CONFIG] & R_CONFIG_ENDIAN);
goto no_reg_update;
case R_TXD1:
- tx_data_bytes(s, (uint32_t)value, 1);
+ tx_data_bytes(&s->tx_fifo, (uint32_t)value, 1,
+ s->regs[R_CONFIG] & R_CONFIG_ENDIAN);
goto no_reg_update;
case R_TXD2:
- tx_data_bytes(s, (uint32_t)value, 2);
+ tx_data_bytes(&s->tx_fifo, (uint32_t)value, 2,
+ s->regs[R_CONFIG] & R_CONFIG_ENDIAN);
goto no_reg_update;
case R_TXD3:
- tx_data_bytes(s, (uint32_t)value, 3);
+ tx_data_bytes(&s->tx_fifo, (uint32_t)value, 3,
+ s->regs[R_CONFIG] & R_CONFIG_ENDIAN);
goto no_reg_update;
}
s->regs[addr] = (s->regs[addr] & ~mask) | (value & mask);
no_reg_update:
xilinx_spips_update_cs_lines(s);
- if ((man_start_com && s->regs[R_CONFIG] & MAN_START_EN) ||
- (fifo8_is_empty(&s->tx_fifo) && s->regs[R_CONFIG] & MAN_START_EN)) {
- xilinx_spips_flush_txfifo(s);
- }
+ xilinx_spips_check_flush(s);
xilinx_spips_update_cs_lines(s);
xilinx_spips_update_ixr(s);
}
@@ -517,6 +1004,7 @@ static void xilinx_qspips_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
XilinxQSPIPS *q = XILINX_QSPIPS(opaque);
+ XilinxSPIPS *s = XILINX_SPIPS(opaque);
xilinx_spips_write(opaque, addr, value, size);
addr >>= 2;
@@ -524,6 +1012,72 @@ static void xilinx_qspips_write(void *opaque, hwaddr addr,
if (addr == R_LQSPI_CFG) {
xilinx_qspips_invalidate_mmio_ptr(q);
}
+ if (s->regs[R_CMND] & R_CMND_RXFIFO_DRAIN) {
+ fifo8_reset(&s->rx_fifo);
+ }
+}
+
+static void xlnx_zynqmp_qspips_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(opaque);
+ uint32_t reg = addr / 4;
+
+ if (reg <= R_MOD_ID) {
+ xilinx_qspips_write(opaque, addr, value, size);
+ } else {
+ switch (reg) {
+ case R_GQSPI_CNFG:
+ if (FIELD_EX32(value, GQSPI_CNFG, GEN_FIFO_START) &&
+ ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, GEN_FIFO_START_MODE)) {
+ s->man_start_com_g = true;
+ }
+ s->regs[reg] = value & ~(R_GQSPI_CNFG_GEN_FIFO_START_MASK);
+ break;
+ case R_GQSPI_GEN_FIFO:
+ if (!fifo32_is_full(&s->fifo_g)) {
+ fifo32_push(&s->fifo_g, value);
+ }
+ break;
+ case R_GQSPI_TXD:
+ tx_data_bytes(&s->tx_fifo_g, (uint32_t)value, 4,
+ ARRAY_FIELD_EX32(s->regs, GQSPI_CNFG, ENDIAN));
+ break;
+ case R_GQSPI_FIFO_CTRL:
+ if (FIELD_EX32(value, GQSPI_FIFO_CTRL, GENERIC_FIFO_RESET)) {
+ fifo32_reset(&s->fifo_g);
+ }
+ if (FIELD_EX32(value, GQSPI_FIFO_CTRL, TX_FIFO_RESET)) {
+ fifo8_reset(&s->tx_fifo_g);
+ }
+ if (FIELD_EX32(value, GQSPI_FIFO_CTRL, RX_FIFO_RESET)) {
+ fifo8_reset(&s->rx_fifo_g);
+ }
+ break;
+ case R_GQSPI_IDR:
+ s->regs[R_GQSPI_IMR] |= value;
+ break;
+ case R_GQSPI_IER:
+ s->regs[R_GQSPI_IMR] &= ~value;
+ break;
+ case R_GQSPI_ISR:
+ s->regs[R_GQSPI_ISR] &= ~value;
+ break;
+ case R_GQSPI_IMR:
+ case R_GQSPI_RXD:
+ case R_GQSPI_GF_SNAPSHOT:
+ case R_GQSPI_MOD_ID:
+ break;
+ default:
+ s->regs[reg] = value;
+ break;
+ }
+ xlnx_zynqmp_qspips_update_cs_lines(s);
+ xlnx_zynqmp_qspips_check_flush(s);
+ xlnx_zynqmp_qspips_update_cs_lines(s);
+ xlnx_zynqmp_qspips_update_ixr(s);
+ }
+ xlnx_zynqmp_qspips_notify(s);
}
static const MemoryRegionOps qspips_ops = {
@@ -532,6 +1086,12 @@ static const MemoryRegionOps qspips_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
+static const MemoryRegionOps xlnx_zynqmp_qspips_ops = {
+ .read = xlnx_zynqmp_qspips_read,
+ .write = xlnx_zynqmp_qspips_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
#define LQSPI_CACHE_SIZE 1024
static void lqspi_load_cache(void *opaque, hwaddr addr)
@@ -563,6 +1123,9 @@ static void lqspi_load_cache(void *opaque, hwaddr addr)
fifo8_push(&s->tx_fifo, s->regs[R_LQSPI_CFG] & LQSPI_CFG_INST_CODE);
/* read address */
DB_PRINT_L(0, "pushing read address %06x\n", flash_addr);
+ if (s->regs[R_LQSPI_CFG] & LQSPI_CFG_ADDR4) {
+ fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 24));
+ }
fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 16));
fifo8_push(&s->tx_fifo, (uint8_t)(flash_addr >> 8));
fifo8_push(&s->tx_fifo, (uint8_t)flash_addr);
@@ -586,11 +1149,11 @@ static void lqspi_load_cache(void *opaque, hwaddr addr)
while (cache_entry < LQSPI_CACHE_SIZE) {
for (i = 0; i < 64; ++i) {
- tx_data_bytes(s, 0, 1);
+ tx_data_bytes(&s->tx_fifo, 0, 1, false);
}
xilinx_spips_flush_txfifo(s);
for (i = 0; i < 64; ++i) {
- rx_data_bytes(s, &q->lqspi_buf[cache_entry++], 1);
+ rx_data_bytes(&s->rx_fifo, &q->lqspi_buf[cache_entry++], 1);
}
}
@@ -666,6 +1229,7 @@ static void xilinx_spips_realize(DeviceState *dev, Error **errp)
}
s->cs_lines = g_new0(qemu_irq, s->num_cs * s->num_busses);
+ s->cs_lines_state = g_new0(bool, s->num_cs * s->num_busses);
for (i = 0, cs = s->cs_lines; i < s->num_busses; ++i, cs += s->num_cs) {
ssi_auto_connect_slaves(DEVICE(s), cs, s->spi[i]);
}
@@ -676,7 +1240,7 @@ static void xilinx_spips_realize(DeviceState *dev, Error **errp)
}
memory_region_init_io(&s->iomem, OBJECT(s), xsc->reg_ops, s,
- "spi", XLNX_SPIPS_R_MAX * 4);
+ "spi", XLNX_ZYNQMP_SPIPS_R_MAX * 4);
sysbus_init_mmio(sbd, &s->iomem);
s->irqline = -1;
@@ -714,6 +1278,28 @@ static void xilinx_qspips_realize(DeviceState *dev, Error **errp)
}
}
+static void xlnx_zynqmp_qspips_realize(DeviceState *dev, Error **errp)
+{
+ XlnxZynqMPQSPIPS *s = XLNX_ZYNQMP_QSPIPS(dev);
+ XilinxSPIPSClass *xsc = XILINX_SPIPS_GET_CLASS(s);
+
+ xilinx_qspips_realize(dev, errp);
+ fifo8_create(&s->rx_fifo_g, xsc->rx_fifo_size);
+ fifo8_create(&s->tx_fifo_g, xsc->tx_fifo_size);
+ fifo32_create(&s->fifo_g, 32);
+}
+
+static void xlnx_zynqmp_qspips_init(Object *obj)
+{
+ XlnxZynqMPQSPIPS *rq = XLNX_ZYNQMP_QSPIPS(obj);
+
+ object_property_add_link(obj, "stream-connected-dma", TYPE_STREAM_SLAVE,
+ (Object **)&rq->dma,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ NULL);
+}
+
static int xilinx_spips_post_load(void *opaque, int version_id)
{
xilinx_spips_update_ixr((XilinxSPIPS *)opaque);
@@ -735,6 +1321,46 @@ static const VMStateDescription vmstate_xilinx_spips = {
}
};
+static int xlnx_zynqmp_qspips_post_load(void *opaque, int version_id)
+{
+ XlnxZynqMPQSPIPS *s = (XlnxZynqMPQSPIPS *)opaque;
+ XilinxSPIPS *qs = XILINX_SPIPS(s);
+
+ if (ARRAY_FIELD_EX32(s->regs, GQSPI_SELECT, GENERIC_QSPI_EN) &&
+ fifo8_is_empty(&qs->rx_fifo) && fifo8_is_empty(&qs->tx_fifo)) {
+ xlnx_zynqmp_qspips_update_ixr(s);
+ xlnx_zynqmp_qspips_update_cs_lines(s);
+ }
+ return 0;
+}
+
+static const VMStateDescription vmstate_xilinx_qspips = {
+ .name = "xilinx_qspips",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(parent_obj, XilinxQSPIPS, 0,
+ vmstate_xilinx_spips, XilinxSPIPS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_xlnx_zynqmp_qspips = {
+ .name = "xlnx_zynqmp_qspips",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = xlnx_zynqmp_qspips_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(parent_obj, XlnxZynqMPQSPIPS, 0,
+ vmstate_xilinx_qspips, XilinxQSPIPS),
+ VMSTATE_FIFO8(tx_fifo_g, XlnxZynqMPQSPIPS),
+ VMSTATE_FIFO8(rx_fifo_g, XlnxZynqMPQSPIPS),
+ VMSTATE_FIFO32(fifo_g, XlnxZynqMPQSPIPS),
+ VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPQSPIPS, XLNX_ZYNQMP_SPIPS_R_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static Property xilinx_qspips_properties[] = {
/* We had to turn this off for 2.10 as it is not compatible with migration.
* It can be enabled but will prevent the device to be migrated.
@@ -779,6 +1405,19 @@ static void xilinx_spips_class_init(ObjectClass *klass, void *data)
xsc->tx_fifo_size = TXFF_A;
}
+static void xlnx_zynqmp_qspips_class_init(ObjectClass *klass, void * data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ XilinxSPIPSClass *xsc = XILINX_SPIPS_CLASS(klass);
+
+ dc->realize = xlnx_zynqmp_qspips_realize;
+ dc->reset = xlnx_zynqmp_qspips_reset;
+ dc->vmsd = &vmstate_xlnx_zynqmp_qspips;
+ xsc->reg_ops = &xlnx_zynqmp_qspips_ops;
+ xsc->rx_fifo_size = RXFF_A_Q;
+ xsc->tx_fifo_size = TXFF_A_Q;
+}
+
static const TypeInfo xilinx_spips_info = {
.name = TYPE_XILINX_SPIPS,
.parent = TYPE_SYS_BUS_DEVICE,
@@ -794,10 +1433,19 @@ static const TypeInfo xilinx_qspips_info = {
.class_init = xilinx_qspips_class_init,
};
+static const TypeInfo xlnx_zynqmp_qspips_info = {
+ .name = TYPE_XLNX_ZYNQMP_QSPIPS,
+ .parent = TYPE_XILINX_QSPIPS,
+ .instance_size = sizeof(XlnxZynqMPQSPIPS),
+ .instance_init = xlnx_zynqmp_qspips_init,
+ .class_init = xlnx_zynqmp_qspips_class_init,
+};
+
static void xilinx_spips_register_types(void)
{
type_register_static(&xilinx_spips_info);
type_register_static(&xilinx_qspips_info);
+ type_register_static(&xlnx_zynqmp_qspips_info);
}
type_init(xilinx_spips_register_types)
diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c
index 5e61ad50a8..dbc4a0baec 100644
--- a/hw/timer/i8254.c
+++ b/hw/timer/i8254.c
@@ -23,7 +23,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "qemu/timer.h"
#include "hw/timer/i8254.h"
diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c
index b623c96198..6190b6fc5d 100644
--- a/hw/timer/i8254_common.c
+++ b/hw/timer/i8254_common.c
@@ -24,7 +24,6 @@
*/
#include "qemu/osdep.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "qemu/timer.h"
#include "hw/timer/i8254.h"
diff --git a/hw/timer/mc146818rtc.c b/hw/timer/mc146818rtc.c
index 7764be25ec..35a05a64cc 100644
--- a/hw/timer/mc146818rtc.c
+++ b/hw/timer/mc146818rtc.c
@@ -999,7 +999,7 @@ static void rtc_realizefn(DeviceState *dev, Error **errp)
qdev_init_gpio_out(dev, &s->irq, 1);
}
-ISADevice *rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq)
+ISADevice *mc146818_rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq)
{
DeviceState *dev;
ISADevice *isadev;
diff --git a/hw/tpm/Makefile.objs b/hw/tpm/Makefile.objs
index 41f0b7a590..7a93b24636 100644
--- a/hw/tpm/Makefile.objs
+++ b/hw/tpm/Makefile.objs
@@ -1,3 +1,4 @@
+common-obj-y += tpm_util.o
common-obj-$(CONFIG_TPM_TIS) += tpm_tis.o
-common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o tpm_util.o
-common-obj-$(CONFIG_TPM_EMULATOR) += tpm_emulator.o tpm_util.o
+common-obj-$(CONFIG_TPM_PASSTHROUGH) += tpm_passthrough.o
+common-obj-$(CONFIG_TPM_EMULATOR) += tpm_emulator.o
diff --git a/hw/tpm/tpm_emulator.c b/hw/tpm/tpm_emulator.c
index e1a68104d6..35c78de5a9 100644
--- a/hw/tpm/tpm_emulator.c
+++ b/hw/tpm/tpm_emulator.c
@@ -33,7 +33,6 @@
#include "sysemu/tpm_backend.h"
#include "tpm_int.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "tpm_util.h"
#include "tpm_ioctl.h"
#include "migration/blocker.h"
@@ -73,6 +72,9 @@ typedef struct TPMEmulator {
Error *migration_blocker;
QemuMutex mutex;
+
+ unsigned int established_flag:1;
+ unsigned int established_flag_cached:1;
} TPMEmulator;
@@ -186,7 +188,6 @@ static int tpm_emulator_set_locality(TPMEmulator *tpm_emu, uint8_t locty_number,
static void tpm_emulator_handle_request(TPMBackend *tb, TPMBackendCmd *cmd)
{
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
- TPMIfClass *tic = TPM_IF_GET_CLASS(tb->tpm_state);
Error *err = NULL;
DPRINTF("processing TPM command");
@@ -201,7 +202,6 @@ static void tpm_emulator_handle_request(TPMBackend *tb, TPMBackendCmd *cmd)
goto error;
}
- tic->request_completed(TPM_IF(tb->tpm_state));
return;
error:
@@ -234,13 +234,14 @@ static int tpm_emulator_check_caps(TPMEmulator *tpm_emu)
switch (tpm_emu->tpm_version) {
case TPM_VERSION_1_2:
caps = PTM_CAP_INIT | PTM_CAP_SHUTDOWN | PTM_CAP_GET_TPMESTABLISHED |
- PTM_CAP_SET_LOCALITY | PTM_CAP_SET_DATAFD;
+ PTM_CAP_SET_LOCALITY | PTM_CAP_SET_DATAFD | PTM_CAP_STOP |
+ PTM_CAP_SET_BUFFERSIZE;
tpm = "1.2";
break;
case TPM_VERSION_2_0:
caps = PTM_CAP_INIT | PTM_CAP_SHUTDOWN | PTM_CAP_GET_TPMESTABLISHED |
PTM_CAP_SET_LOCALITY | PTM_CAP_RESET_TPMESTABLISHED |
- PTM_CAP_SET_DATAFD;
+ PTM_CAP_SET_DATAFD | PTM_CAP_STOP | PTM_CAP_SET_BUFFERSIZE;
tpm = "2";
break;
case TPM_VERSION_UNSPEC:
@@ -257,12 +258,76 @@ static int tpm_emulator_check_caps(TPMEmulator *tpm_emu)
return 0;
}
-static int tpm_emulator_startup_tpm(TPMBackend *tb)
+static int tpm_emulator_stop_tpm(TPMBackend *tb)
+{
+ TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
+ ptm_res res;
+
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_STOP, &res, 0, sizeof(res)) < 0) {
+ error_report("tpm-emulator: Could not stop TPM: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ res = be32_to_cpu(res);
+ if (res) {
+ error_report("tpm-emulator: TPM result for CMD_STOP: 0x%x", res);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tpm_emulator_set_buffer_size(TPMBackend *tb,
+ size_t wanted_size,
+ size_t *actual_size)
+{
+ TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
+ ptm_setbuffersize psbs;
+
+ if (tpm_emulator_stop_tpm(tb) < 0) {
+ return -1;
+ }
+
+ psbs.u.req.buffersize = cpu_to_be32(wanted_size);
+
+ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_BUFFERSIZE, &psbs,
+ sizeof(psbs.u.req), sizeof(psbs.u.resp)) < 0) {
+ error_report("tpm-emulator: Could not set buffer size: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ psbs.u.resp.tpm_result = be32_to_cpu(psbs.u.resp.tpm_result);
+ if (psbs.u.resp.tpm_result != 0) {
+ error_report("tpm-emulator: TPM result for set buffer size : 0x%x",
+ psbs.u.resp.tpm_result);
+ return -1;
+ }
+
+ if (actual_size) {
+ *actual_size = be32_to_cpu(psbs.u.resp.buffersize);
+ }
+
+ DPRINTF("buffer size: %u, min: %u, max: %u\n",
+ be32_to_cpu(psbs.u.resp.buffersize),
+ be32_to_cpu(psbs.u.resp.minsize),
+ be32_to_cpu(psbs.u.resp.maxsize));
+
+ return 0;
+}
+
+static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize)
{
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
ptm_init init;
ptm_res res;
+ if (buffersize != 0 &&
+ tpm_emulator_set_buffer_size(tb, buffersize, NULL) < 0) {
+ goto err_exit;
+ }
+
DPRINTF("%s", __func__);
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_INIT, &init, sizeof(init),
sizeof(init)) < 0) {
@@ -287,16 +352,22 @@ static bool tpm_emulator_get_tpm_established_flag(TPMBackend *tb)
TPMEmulator *tpm_emu = TPM_EMULATOR(tb);
ptm_est est;
- DPRINTF("%s", __func__);
+ if (tpm_emu->established_flag_cached) {
+ return tpm_emu->established_flag;
+ }
+
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_GET_TPMESTABLISHED, &est,
0, sizeof(est)) < 0) {
error_report("tpm-emulator: Could not get the TPM established flag: %s",
strerror(errno));
return false;
}
- DPRINTF("established flag: %0x", est.u.resp.bit);
+ DPRINTF("got established flag: %0x", est.u.resp.bit);
- return (est.u.resp.bit != 0);
+ tpm_emu->established_flag_cached = 1;
+ tpm_emu->established_flag = (est.u.resp.bit != 0);
+
+ return tpm_emu->established_flag;
}
static int tpm_emulator_reset_tpm_established_flag(TPMBackend *tb,
@@ -327,6 +398,8 @@ static int tpm_emulator_reset_tpm_established_flag(TPMBackend *tb,
return -1;
}
+ tpm_emu->established_flag_cached = 0;
+
return 0;
}
@@ -340,6 +413,7 @@ static void tpm_emulator_cancel_cmd(TPMBackend *tb)
return;
}
+ /* FIXME: make the function non-blocking, or it may block a VCPU */
if (tpm_emulator_ctrlcmd(tpm_emu, CMD_CANCEL_TPM_CMD, &res, 0,
sizeof(res)) < 0) {
error_report("tpm-emulator: Could not cancel command: %s",
@@ -357,6 +431,17 @@ static TPMVersion tpm_emulator_get_tpm_version(TPMBackend *tb)
return tpm_emu->tpm_version;
}
+static size_t tpm_emulator_get_buffer_size(TPMBackend *tb)
+{
+ size_t actual_size;
+
+ if (tpm_emulator_set_buffer_size(tb, 0, &actual_size) < 0) {
+ return 4096;
+ }
+
+ return actual_size;
+}
+
static int tpm_emulator_block_migration(TPMEmulator *tpm_emu)
{
Error *err = NULL;
@@ -465,22 +550,16 @@ err:
return -1;
}
-static TPMBackend *tpm_emulator_create(QemuOpts *opts, const char *id)
+static TPMBackend *tpm_emulator_create(QemuOpts *opts)
{
TPMBackend *tb = TPM_BACKEND(object_new(TYPE_TPM_EMULATOR));
- tb->id = g_strdup(id);
-
if (tpm_emulator_handle_device_opts(TPM_EMULATOR(tb), opts)) {
- goto err_exit;
+ object_unref(OBJECT(tb));
+ return NULL;
}
return tb;
-
-err_exit:
- object_unref(OBJECT(tb));
-
- return NULL;
}
static TpmTypeOptions *tpm_emulator_get_tpm_options(TPMBackend *tb)
@@ -563,6 +642,7 @@ static void tpm_emulator_class_init(ObjectClass *klass, void *data)
tbc->get_tpm_established_flag = tpm_emulator_get_tpm_established_flag;
tbc->reset_tpm_established_flag = tpm_emulator_reset_tpm_established_flag;
tbc->get_tpm_version = tpm_emulator_get_tpm_version;
+ tbc->get_buffer_size = tpm_emulator_get_buffer_size;
tbc->get_tpm_options = tpm_emulator_get_tpm_options;
tbc->handle_request = tpm_emulator_handle_request;
diff --git a/hw/tpm/tpm_int.h b/hw/tpm/tpm_int.h
index 9c045b6691..abbca5191a 100644
--- a/hw/tpm/tpm_int.h
+++ b/hw/tpm/tpm_int.h
@@ -13,28 +13,8 @@
#define TPM_TPM_INT_H
#include "qemu/osdep.h"
-#include "qom/object.h"
-#define TYPE_TPM_IF "tpm-if"
-#define TPM_IF_CLASS(klass) \
- OBJECT_CLASS_CHECK(TPMIfClass, (klass), TYPE_TPM_IF)
-#define TPM_IF_GET_CLASS(obj) \
- OBJECT_GET_CLASS(TPMIfClass, (obj), TYPE_TPM_IF)
-#define TPM_IF(obj) \
- INTERFACE_CHECK(TPMIf, (obj), TYPE_TPM_IF)
-
-typedef struct TPMIf {
- Object parent_obj;
-} TPMIf;
-
-typedef struct TPMIfClass {
- InterfaceClass parent_class;
-
- /* run in thread pool by backend */
- void (*request_completed)(TPMIf *obj);
-} TPMIfClass;
-
-#define TPM_STANDARD_CMDLINE_OPTS \
+#define TPM_STANDARD_CMDLINE_OPTS \
{ \
.name = "type", \
.type = QEMU_OPT_STRING, \
@@ -65,11 +45,20 @@ struct tpm_resp_hdr {
#define TPM_ORD_ContinueSelfTest 0x53
#define TPM_ORD_GetTicks 0xf1
+#define TPM_ORD_GetCapability 0x65
+#define TPM_CAP_PROPERTY 0x05
+
+#define TPM_CAP_PROP_INPUT_BUFFER 0x124
/* TPM2 defines */
#define TPM2_ST_NO_SESSIONS 0x8001
#define TPM2_CC_ReadClock 0x00000181
+#define TPM2_CC_GetCapability 0x0000017a
+
+#define TPM2_CAP_TPM_PROPERTIES 0x6
+
+#define TPM2_PT_MAX_COMMAND_SIZE 0x11e
#endif /* TPM_TPM_INT_H */
diff --git a/hw/tpm/tpm_ioctl.h b/hw/tpm/tpm_ioctl.h
index 33564b11de..54c8d345ad 100644
--- a/hw/tpm/tpm_ioctl.h
+++ b/hw/tpm/tpm_ioctl.h
@@ -169,6 +169,28 @@ struct ptm_getconfig {
#define PTM_CONFIG_FLAG_FILE_KEY 0x1
#define PTM_CONFIG_FLAG_MIGRATION_KEY 0x2
+/*
+ * PTM_SET_BUFFERSIZE: Set the buffer size to be used by the TPM.
+ * A 0 on input queries for the current buffer size. Any other
+ * number will try to set the buffer size. The returned number is
+ * the buffer size that will be used, which can be larger than the
+ * requested one, if it was below the minimum, or smaller than the
+ * requested one, if it was above the maximum.
+ */
+struct ptm_setbuffersize {
+ union {
+ struct {
+ uint32_t buffersize; /* 0 to query for current buffer size */
+ } req; /* request */
+ struct {
+ ptm_res tpm_result;
+ uint32_t buffersize; /* buffer size in use */
+ uint32_t minsize; /* min. supported buffer size */
+ uint32_t maxsize; /* max. supported buffer size */
+ } resp; /* response */
+ } u;
+};
+
typedef uint64_t ptm_cap;
typedef struct ptm_est ptm_est;
@@ -179,6 +201,7 @@ typedef struct ptm_init ptm_init;
typedef struct ptm_getstate ptm_getstate;
typedef struct ptm_setstate ptm_setstate;
typedef struct ptm_getconfig ptm_getconfig;
+typedef struct ptm_setbuffersize ptm_setbuffersize;
/* capability flags returned by PTM_GET_CAPABILITY */
#define PTM_CAP_INIT (1)
@@ -194,6 +217,7 @@ typedef struct ptm_getconfig ptm_getconfig;
#define PTM_CAP_STOP (1 << 10)
#define PTM_CAP_GET_CONFIG (1 << 11)
#define PTM_CAP_SET_DATAFD (1 << 12)
+#define PTM_CAP_SET_BUFFERSIZE (1 << 13)
enum {
PTM_GET_CAPABILITY = _IOR('P', 0, ptm_cap),
@@ -212,6 +236,7 @@ enum {
PTM_STOP = _IOR('P', 13, ptm_res),
PTM_GET_CONFIG = _IOR('P', 14, ptm_getconfig),
PTM_SET_DATAFD = _IOR('P', 15, ptm_res),
+ PTM_SET_BUFFERSIZE = _IOWR('P', 16, ptm_setbuffersize),
};
/*
@@ -240,7 +265,8 @@ enum {
CMD_SET_STATEBLOB,
CMD_STOP,
CMD_GET_CONFIG,
- CMD_SET_DATAFD
+ CMD_SET_DATAFD,
+ CMD_SET_BUFFERSIZE,
};
#endif /* _TPM_IOCTL_H */
diff --git a/hw/tpm/tpm_passthrough.c b/hw/tpm/tpm_passthrough.c
index c440aff4b2..149fae63e6 100644
--- a/hw/tpm/tpm_passthrough.c
+++ b/hw/tpm/tpm_passthrough.c
@@ -29,7 +29,6 @@
#include "sysemu/tpm_backend.h"
#include "tpm_int.h"
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "qapi/clone-visitor.h"
#include "tpm_util.h"
@@ -57,6 +56,7 @@ struct TPMPassthruState {
int cancel_fd;
TPMVersion tpm_version;
+ size_t tpm_buffersize;
};
typedef struct TPMPassthruState TPMPassthruState;
@@ -89,6 +89,7 @@ static int tpm_passthrough_unix_tx_bufs(TPMPassthruState *tpm_pt,
bool is_selftest;
const struct tpm_resp_hdr *hdr;
+ /* FIXME: protect shared variables or use other sync mechanism */
tpm_pt->tpm_op_canceled = false;
tpm_pt->tpm_executing = true;
*selftest_done = false;
@@ -139,14 +140,11 @@ err_exit:
static void tpm_passthrough_handle_request(TPMBackend *tb, TPMBackendCmd *cmd)
{
TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb);
- TPMIfClass *tic = TPM_IF_GET_CLASS(tb->tpm_state);
DPRINTF("tpm_passthrough: processing command %p\n", cmd);
tpm_passthrough_unix_tx_bufs(tpm_pt, cmd->in, cmd->in_len,
cmd->out, cmd->out_len, &cmd->selftest_done);
-
- tic->request_completed(TPM_IF(tb->tpm_state));
}
static void tpm_passthrough_reset(TPMBackend *tb)
@@ -181,12 +179,11 @@ static void tpm_passthrough_cancel_cmd(TPMBackend *tb)
*/
if (tpm_pt->tpm_executing) {
if (tpm_pt->cancel_fd >= 0) {
+ tpm_pt->tpm_op_canceled = true;
n = write(tpm_pt->cancel_fd, "-", 1);
if (n != 1) {
error_report("Canceling TPM command failed: %s",
strerror(errno));
- } else {
- tpm_pt->tpm_op_canceled = true;
}
} else {
error_report("Cannot cancel TPM command due to missing "
@@ -202,6 +199,19 @@ static TPMVersion tpm_passthrough_get_tpm_version(TPMBackend *tb)
return tpm_pt->tpm_version;
}
+static size_t tpm_passthrough_get_buffer_size(TPMBackend *tb)
+{
+ TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb);
+ int ret;
+
+ ret = tpm_util_get_buffer_size(tpm_pt->tpm_fd, tpm_pt->tpm_version,
+ &tpm_pt->tpm_buffersize);
+ if (ret < 0) {
+ tpm_pt->tpm_buffersize = 4096;
+ }
+ return tpm_pt->tpm_buffersize;
+}
+
/*
* Unless path or file descriptor set has been provided by user,
* determine the sysfs cancel file following kernel documentation
@@ -229,9 +239,7 @@ static int tpm_passthrough_open_sysfs_cancel(TPMPassthruState *tpm_pt)
if (snprintf(path, sizeof(path), "/sys/class/misc/%s/device/cancel",
dev) < sizeof(path)) {
fd = qemu_open(path, O_WRONLY);
- if (fd >= 0) {
- tpm_pt->options->cancel_path = g_strdup(path);
- } else {
+ if (fd < 0) {
error_report("tpm_passthrough: Could not open TPM cancel "
"path %s : %s", path, strerror(errno));
}
@@ -244,9 +252,9 @@ static int tpm_passthrough_open_sysfs_cancel(TPMPassthruState *tpm_pt)
return fd;
}
-static int tpm_passthrough_handle_device_opts(QemuOpts *opts, TPMBackend *tb)
+static int
+tpm_passthrough_handle_device_opts(TPMPassthruState *tpm_pt, QemuOpts *opts)
{
- TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb);
const char *value;
value = qemu_opt_get(opts, "cancel-path");
@@ -266,52 +274,47 @@ static int tpm_passthrough_handle_device_opts(QemuOpts *opts, TPMBackend *tb)
if (tpm_pt->tpm_fd < 0) {
error_report("Cannot access TPM device using '%s': %s",
tpm_pt->tpm_dev, strerror(errno));
- goto err_free_parameters;
+ return -1;
}
if (tpm_util_test_tpmdev(tpm_pt->tpm_fd, &tpm_pt->tpm_version)) {
error_report("'%s' is not a TPM device.",
tpm_pt->tpm_dev);
- goto err_close_tpmdev;
+ return -1;
}
- return 0;
-
- err_close_tpmdev:
- qemu_close(tpm_pt->tpm_fd);
- tpm_pt->tpm_fd = -1;
-
- err_free_parameters:
- qapi_free_TPMPassthroughOptions(tpm_pt->options);
- tpm_pt->options = NULL;
- tpm_pt->tpm_dev = NULL;
+ tpm_pt->cancel_fd = tpm_passthrough_open_sysfs_cancel(tpm_pt);
+ if (tpm_pt->cancel_fd < 0) {
+ return -1;
+ }
- return 1;
+ return 0;
}
-static TPMBackend *tpm_passthrough_create(QemuOpts *opts, const char *id)
+static TPMBackend *tpm_passthrough_create(QemuOpts *opts)
{
Object *obj = object_new(TYPE_TPM_PASSTHROUGH);
- TPMBackend *tb = TPM_BACKEND(obj);
- TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb);
- tb->id = g_strdup(id);
-
- if (tpm_passthrough_handle_device_opts(opts, tb)) {
- goto err_exit;
+ if (tpm_passthrough_handle_device_opts(TPM_PASSTHROUGH(obj), opts)) {
+ object_unref(obj);
+ return NULL;
}
- tpm_pt->cancel_fd = tpm_passthrough_open_sysfs_cancel(tpm_pt);
- if (tpm_pt->cancel_fd < 0) {
- goto err_exit;
- }
+ return TPM_BACKEND(obj);
+}
- return tb;
+static int tpm_passthrough_startup_tpm(TPMBackend *tb, size_t buffersize)
+{
+ TPMPassthruState *tpm_pt = TPM_PASSTHROUGH(tb);
-err_exit:
- object_unref(obj);
+ if (buffersize && buffersize < tpm_pt->tpm_buffersize) {
+ error_report("Requested buffer size of %zu is smaller than host TPM's "
+ "fixed buffer size of %zu",
+ buffersize, tpm_pt->tpm_buffersize);
+ return -1;
+ }
- return NULL;
+ return 0;
}
static TpmTypeOptions *tpm_passthrough_get_tpm_options(TPMBackend *tb)
@@ -355,8 +358,12 @@ static void tpm_passthrough_inst_finalize(Object *obj)
tpm_passthrough_cancel_cmd(TPM_BACKEND(obj));
- qemu_close(tpm_pt->tpm_fd);
- qemu_close(tpm_pt->cancel_fd);
+ if (tpm_pt->tpm_fd >= 0) {
+ qemu_close(tpm_pt->tpm_fd);
+ }
+ if (tpm_pt->cancel_fd >= 0) {
+ qemu_close(tpm_pt->cancel_fd);
+ }
qapi_free_TPMPassthroughOptions(tpm_pt->options);
}
@@ -368,12 +375,14 @@ static void tpm_passthrough_class_init(ObjectClass *klass, void *data)
tbc->opts = tpm_passthrough_cmdline_opts;
tbc->desc = "Passthrough TPM backend driver";
tbc->create = tpm_passthrough_create;
+ tbc->startup_tpm = tpm_passthrough_startup_tpm;
tbc->reset = tpm_passthrough_reset;
tbc->cancel_cmd = tpm_passthrough_cancel_cmd;
tbc->get_tpm_established_flag = tpm_passthrough_get_tpm_established_flag;
tbc->reset_tpm_established_flag =
tpm_passthrough_reset_tpm_established_flag;
tbc->get_tpm_version = tpm_passthrough_get_tpm_version;
+ tbc->get_buffer_size = tpm_passthrough_get_buffer_size;
tbc->get_tpm_options = tpm_passthrough_get_tpm_options;
tbc->handle_request = tpm_passthrough_handle_request;
}
diff --git a/hw/tpm/tpm_tis.c b/hw/tpm/tpm_tis.c
index 42d647d363..561384cd86 100644
--- a/hw/tpm/tpm_tis.c
+++ b/hw/tpm/tpm_tis.c
@@ -24,17 +24,13 @@
#include "qemu/osdep.h"
#include "hw/isa/isa.h"
-#include "sysemu/tpm_backend.h"
-#include "tpm_int.h"
-#include "sysemu/block-backend.h"
-#include "exec/address-spaces.h"
-#include "hw/hw.h"
-#include "hw/i386/pc.h"
-#include "hw/pci/pci_ids.h"
#include "qapi/error.h"
-#include "qemu-common.h"
-#include "qemu/main-loop.h"
+
#include "hw/acpi/tpm.h"
+#include "hw/pci/pci_ids.h"
+#include "sysemu/tpm_backend.h"
+#include "tpm_int.h"
+#include "tpm_util.h"
#define TPM_TIS_NUM_LOCALITIES 5 /* per spec */
#define TPM_TIS_LOCALITY_SHIFT 12
@@ -52,11 +48,6 @@ typedef enum {
TPM_TIS_STATE_RECEPTION,
} TPMTISState;
-typedef struct TPMSizedBuffer {
- uint32_t size;
- uint8_t *buffer;
-} TPMSizedBuffer;
-
/* locality data -- all fields are persisted */
typedef struct TPMLocality {
TPMTISState state;
@@ -65,20 +56,14 @@ typedef struct TPMLocality {
uint32_t iface_id;
uint32_t inte;
uint32_t ints;
-
- uint16_t w_offset;
- uint16_t r_offset;
- TPMSizedBuffer w_buffer;
- TPMSizedBuffer r_buffer;
} TPMLocality;
-struct TPMState {
+typedef struct TPMState {
ISADevice busdev;
MemoryRegion mmio;
- QEMUBH *bh;
- uint32_t offset;
- uint8_t buf[TPM_TIS_BUFFER_MAX];
+ unsigned char buffer[TPM_TIS_BUFFER_MAX];
+ uint16_t rw_offset;
uint8_t active_locty;
uint8_t aborting_locty;
@@ -89,13 +74,13 @@ struct TPMState {
qemu_irq irq;
uint32_t irq_num;
- uint8_t locty_number;
TPMBackendCmd cmd;
- char *backend;
TPMBackend *be_driver;
TPMVersion be_tpm_version;
-};
+
+ size_t be_buffer_size;
+} TPMState;
#define TPM(obj) OBJECT_CHECK(TPMState, (obj), TYPE_TPM_TIS)
@@ -220,23 +205,19 @@ static uint8_t tpm_tis_locality_from_addr(hwaddr addr)
return (uint8_t)((addr >> TPM_TIS_LOCALITY_SHIFT) & 0x7);
}
-static uint32_t tpm_tis_get_size_from_buffer(const TPMSizedBuffer *sb)
-{
- return be32_to_cpu(*(uint32_t *)&sb->buffer[2]);
-}
-
-static void tpm_tis_show_buffer(const TPMSizedBuffer *sb, const char *string)
+static void tpm_tis_show_buffer(const unsigned char *buffer,
+ size_t buffer_size, const char *string)
{
#ifdef DEBUG_TIS
uint32_t len, i;
- len = tpm_tis_get_size_from_buffer(sb);
+ len = MIN(tpm_cmd_get_size(buffer), buffer_size);
DPRINTF("tpm_tis: %s length = %d\n", string, len);
for (i = 0; i < len; i++) {
if (i && !(i % 16)) {
DPRINTF("\n");
}
- DPRINTF("%.2X ", sb->buffer[i]);
+ DPRINTF("%.2X ", buffer[i]);
}
DPRINTF("\n");
#endif
@@ -266,22 +247,21 @@ static void tpm_tis_sts_set(TPMLocality *l, uint32_t flags)
*/
static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
{
- TPMLocality *locty_data = &s->loc[locty];
-
- tpm_tis_show_buffer(&s->loc[locty].w_buffer, "tpm_tis: To TPM");
+ tpm_tis_show_buffer(s->buffer, s->be_buffer_size,
+ "tpm_tis: To TPM");
/*
- * w_offset serves as length indicator for length of data;
+ * rw_offset serves as length indicator for length of data;
* it's reset when the response comes back
*/
s->loc[locty].state = TPM_TIS_STATE_EXECUTION;
s->cmd = (TPMBackendCmd) {
.locty = locty,
- .in = locty_data->w_buffer.buffer,
- .in_len = locty_data->w_offset,
- .out = locty_data->r_buffer.buffer,
- .out_len = locty_data->r_buffer.size
+ .in = s->buffer,
+ .in_len = s->rw_offset,
+ .out = s->buffer,
+ .out_len = s->be_buffer_size,
};
tpm_backend_deliver_request(s->be_driver, &s->cmd);
@@ -361,8 +341,7 @@ static void tpm_tis_new_active_locality(TPMState *s, uint8_t new_active_locty)
/* abort -- this function switches the locality */
static void tpm_tis_abort(TPMState *s, uint8_t locty)
{
- s->loc[locty].r_offset = 0;
- s->loc[locty].w_offset = 0;
+ s->rw_offset = 0;
DPRINTF("tpm_tis: tis_abort: new active locality is %d\n", s->next_locty);
@@ -411,18 +390,28 @@ static void tpm_tis_prep_abort(TPMState *s, uint8_t locty, uint8_t newlocty)
tpm_tis_abort(s, locty);
}
-static void tpm_tis_receive_bh(void *opaque)
+/*
+ * Callback from the TPM to indicate that the response was received.
+ */
+static void tpm_tis_request_completed(TPMIf *ti)
{
- TPMState *s = opaque;
+ TPMState *s = TPM(ti);
uint8_t locty = s->cmd.locty;
+ uint8_t l;
+
+ if (s->cmd.selftest_done) {
+ for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
+ s->loc[locty].sts |= TPM_TIS_STS_SELFTEST_DONE;
+ }
+ }
tpm_tis_sts_set(&s->loc[locty],
TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE);
s->loc[locty].state = TPM_TIS_STATE_COMPLETION;
- s->loc[locty].r_offset = 0;
- s->loc[locty].w_offset = 0;
+ s->rw_offset = 0;
- tpm_tis_show_buffer(&s->loc[locty].r_buffer, "tpm_tis: From TPM");
+ tpm_tis_show_buffer(s->buffer, s->be_buffer_size,
+ "tpm_tis: From TPM");
if (TPM_TIS_IS_VALID_LOCTY(s->next_locty)) {
tpm_tis_abort(s, locty);
@@ -432,23 +421,6 @@ static void tpm_tis_receive_bh(void *opaque)
TPM_TIS_INT_DATA_AVAILABLE | TPM_TIS_INT_STS_VALID);
}
-static void tpm_tis_request_completed(TPMIf *ti)
-{
- TPMState *s = TPM(ti);
-
- bool is_selftest_done = s->cmd.selftest_done;
- uint8_t locty = s->cmd.locty;
- uint8_t l;
-
- if (is_selftest_done) {
- for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
- s->loc[locty].sts |= TPM_TIS_STS_SELFTEST_DONE;
- }
- }
-
- qemu_bh_schedule(s->bh);
-}
-
/*
* Read a byte of response data
*/
@@ -458,16 +430,17 @@ static uint32_t tpm_tis_data_read(TPMState *s, uint8_t locty)
uint16_t len;
if ((s->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
- len = tpm_tis_get_size_from_buffer(&s->loc[locty].r_buffer);
+ len = MIN(tpm_cmd_get_size(&s->buffer),
+ s->be_buffer_size);
- ret = s->loc[locty].r_buffer.buffer[s->loc[locty].r_offset++];
- if (s->loc[locty].r_offset >= len) {
+ ret = s->buffer[s->rw_offset++];
+ if (s->rw_offset >= len) {
/* got last byte */
tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID);
tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
}
DPRINTF("tpm_tis: tpm_tis_data_read byte 0x%02x [%d]\n",
- ret, s->loc[locty].r_offset - 1);
+ ret, s->rw_offset - 1);
}
return ret;
@@ -502,27 +475,15 @@ static void tpm_tis_dump_state(void *opaque, hwaddr addr)
(int)tpm_tis_mmio_read(opaque, base + regs[idx], 4));
}
- DPRINTF("tpm_tis: read offset : %d\n"
+ DPRINTF("tpm_tis: r/w offset : %d\n"
"tpm_tis: result buffer : ",
- s->loc[locty].r_offset);
- for (idx = 0;
- idx < tpm_tis_get_size_from_buffer(&s->loc[locty].r_buffer);
- idx++) {
- DPRINTF("%c%02x%s",
- s->loc[locty].r_offset == idx ? '>' : ' ',
- s->loc[locty].r_buffer.buffer[idx],
- ((idx & 0xf) == 0xf) ? "\ntpm_tis: " : "");
- }
- DPRINTF("\n"
- "tpm_tis: write offset : %d\n"
- "tpm_tis: request buffer: ",
- s->loc[locty].w_offset);
+ s->rw_offset);
for (idx = 0;
- idx < tpm_tis_get_size_from_buffer(&s->loc[locty].w_buffer);
+ idx < MIN(tpm_cmd_get_size(&s->buffer), s->be_buffer_size);
idx++) {
DPRINTF("%c%02x%s",
- s->loc[locty].w_offset == idx ? '>' : ' ',
- s->loc[locty].w_buffer.buffer[idx],
+ s->rw_offset == idx ? '>' : ' ',
+ s->buffer[idx],
((idx & 0xf) == 0xf) ? "\ntpm_tis: " : "");
}
DPRINTF("\n");
@@ -584,11 +545,11 @@ static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr,
if (s->active_locty == locty) {
if ((s->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
val = TPM_TIS_BURST_COUNT(
- tpm_tis_get_size_from_buffer(&s->loc[locty].r_buffer)
- - s->loc[locty].r_offset) | s->loc[locty].sts;
+ MIN(tpm_cmd_get_size(&s->buffer),
+ s->be_buffer_size)
+ - s->rw_offset) | s->loc[locty].sts;
} else {
- avail = s->loc[locty].w_buffer.size
- - s->loc[locty].w_offset;
+ avail = s->be_buffer_size - s->rw_offset;
/*
* byte-sized reads should not return 0x00 for 0x100
* available bytes.
@@ -852,8 +813,7 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
switch (s->loc[locty].state) {
case TPM_TIS_STATE_READY:
- s->loc[locty].w_offset = 0;
- s->loc[locty].r_offset = 0;
+ s->rw_offset = 0;
break;
case TPM_TIS_STATE_IDLE:
@@ -871,8 +831,7 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
break;
case TPM_TIS_STATE_COMPLETION:
- s->loc[locty].w_offset = 0;
- s->loc[locty].r_offset = 0;
+ s->rw_offset = 0;
/* shortcut to ready state with C/R set */
s->loc[locty].state = TPM_TIS_STATE_READY;
if (!(s->loc[locty].sts & TPM_TIS_STS_COMMAND_READY)) {
@@ -898,7 +857,7 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
} else if (val == TPM_TIS_STS_RESPONSE_RETRY) {
switch (s->loc[locty].state) {
case TPM_TIS_STATE_COMPLETION:
- s->loc[locty].r_offset = 0;
+ s->rw_offset = 0;
tpm_tis_sts_set(&s->loc[locty],
TPM_TIS_STS_VALID|
TPM_TIS_STS_DATA_AVAILABLE);
@@ -936,9 +895,9 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
}
while ((s->loc[locty].sts & TPM_TIS_STS_EXPECT) && size > 0) {
- if (s->loc[locty].w_offset < s->loc[locty].w_buffer.size) {
- s->loc[locty].w_buffer.
- buffer[s->loc[locty].w_offset++] = (uint8_t)val;
+ if (s->rw_offset < s->be_buffer_size) {
+ s->buffer[s->rw_offset++] =
+ (uint8_t)val;
val >>= 8;
size--;
} else {
@@ -947,13 +906,13 @@ static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
}
/* check for complete packet */
- if (s->loc[locty].w_offset > 5 &&
+ if (s->rw_offset > 5 &&
(s->loc[locty].sts & TPM_TIS_STS_EXPECT)) {
/* we have a packet length - see if we have all of it */
bool need_irq = !(s->loc[locty].sts & TPM_TIS_STS_VALID);
- len = tpm_tis_get_size_from_buffer(&s->loc[locty].w_buffer);
- if (len > s->loc[locty].w_offset) {
+ len = tpm_cmd_get_size(&s->buffer);
+ if (len > s->rw_offset) {
tpm_tis_sts_set(&s->loc[locty],
TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID);
} else {
@@ -986,27 +945,17 @@ static const MemoryRegionOps tpm_tis_memory_ops = {
},
};
-static int tpm_tis_do_startup_tpm(TPMState *s)
+static int tpm_tis_do_startup_tpm(TPMState *s, size_t buffersize)
{
- return tpm_backend_startup_tpm(s->be_driver);
-}
-
-static void tpm_tis_realloc_buffer(TPMSizedBuffer *sb)
-{
- size_t wanted_size = 4096; /* Linux tpm.c buffer size */
-
- if (sb->size != wanted_size) {
- sb->buffer = g_realloc(sb->buffer, wanted_size);
- sb->size = wanted_size;
- }
+ return tpm_backend_startup_tpm(s->be_driver, buffersize);
}
/*
* Get the TPMVersion of the backend device being used
*/
-TPMVersion tpm_tis_get_tpm_version(Object *obj)
+static enum TPMVersion tpm_tis_get_tpm_version(TPMIf *ti)
{
- TPMState *s = TPM(obj);
+ TPMState *s = TPM(ti);
if (tpm_backend_had_startup_error(s->be_driver)) {
return TPM_VERSION_UNSPEC;
@@ -1025,6 +974,8 @@ static void tpm_tis_reset(DeviceState *dev)
int c;
s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver);
+ s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver),
+ TPM_TIS_BUFFER_MAX);
tpm_backend_reset(s->be_driver);
@@ -1050,13 +1001,10 @@ static void tpm_tis_reset(DeviceState *dev)
s->loc[c].ints = 0;
s->loc[c].state = TPM_TIS_STATE_IDLE;
- s->loc[c].w_offset = 0;
- tpm_tis_realloc_buffer(&s->loc[c].w_buffer);
- s->loc[c].r_offset = 0;
- tpm_tis_realloc_buffer(&s->loc[c].r_buffer);
+ s->rw_offset = 0;
}
- tpm_tis_do_startup_tpm(s);
+ tpm_tis_do_startup_tpm(s, s->be_buffer_size);
}
static const VMStateDescription vmstate_tpm_tis = {
@@ -1066,7 +1014,7 @@ static const VMStateDescription vmstate_tpm_tis = {
static Property tpm_tis_properties[] = {
DEFINE_PROP_UINT32("irq", TPMState, irq_num, TPM_TIS_IRQ),
- DEFINE_PROP_STRING("tpmdev", TPMState, backend),
+ DEFINE_PROP_TPMBE("tpmdev", TPMState, be_driver),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1074,29 +1022,21 @@ static void tpm_tis_realizefn(DeviceState *dev, Error **errp)
{
TPMState *s = TPM(dev);
- s->be_driver = qemu_find_tpm(s->backend);
- if (!s->be_driver) {
- error_setg(errp, "tpm_tis: backend driver with id %s could not be "
- "found", s->backend);
+ if (!tpm_find()) {
+ error_setg(errp, "at most one TPM device is permitted");
return;
}
- s->be_driver->fe_model = TPM_MODEL_TPM_TIS;
-
- if (tpm_backend_init(s->be_driver, s)) {
- error_setg(errp, "tpm_tis: backend driver with id %s could not be "
- "initialized", s->backend);
+ if (!s->be_driver) {
+ error_setg(errp, "'tpmdev' property is required");
return;
}
-
if (s->irq_num > 15) {
- error_setg(errp, "tpm_tis: IRQ %d for TPM TIS is outside valid range "
- "of 0 to 15", s->irq_num);
+ error_setg(errp, "IRQ %d is outside valid range of 0 to 15",
+ s->irq_num);
return;
}
- s->bh = qemu_bh_new(tpm_tis_receive_bh, s);
-
isa_init_irq(&s->busdev, &s->irq, s->irq_num);
memory_region_add_subregion(isa_address_space(ISA_DEVICE(dev)),
@@ -1121,6 +1061,8 @@ static void tpm_tis_class_init(ObjectClass *klass, void *data)
dc->props = tpm_tis_properties;
dc->reset = tpm_tis_reset;
dc->vmsd = &vmstate_tpm_tis;
+ tc->model = TPM_MODEL_TPM_TIS;
+ tc->get_version = tpm_tis_get_tpm_version;
tc->request_completed = tpm_tis_request_completed;
}
@@ -1139,7 +1081,6 @@ static const TypeInfo tpm_tis_info = {
static void tpm_tis_register(void)
{
type_register_static(&tpm_tis_info);
- tpm_register_model(TPM_MODEL_TPM_TIS);
}
type_init(tpm_tis_register)
diff --git a/hw/tpm/tpm_util.c b/hw/tpm/tpm_util.c
index daf1faa63d..747075e244 100644
--- a/hw/tpm/tpm_util.c
+++ b/hw/tpm/tpm_util.c
@@ -20,9 +20,85 @@
*/
#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
#include "tpm_util.h"
#include "tpm_int.h"
#include "exec/memory.h"
+#include "sysemu/tpm_backend.h"
+#include "hw/qdev.h"
+
+#define DEBUG_TPM 0
+
+#define DPRINTF(fmt, ...) do { \
+ if (DEBUG_TPM) { \
+ fprintf(stderr, "tpm-util:"fmt"\n", ## __VA_ARGS__); \
+ } \
+} while (0)
+
+/* tpm backend property */
+
+static void get_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
+ Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ TPMBackend **be = qdev_get_prop_ptr(dev, opaque);
+ char *p;
+
+ p = g_strdup(*be ? (*be)->id : "");
+ visit_type_str(v, name, &p, errp);
+ g_free(p);
+}
+
+static void set_tpm(Object *obj, Visitor *v, const char *name, void *opaque,
+ Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Error *local_err = NULL;
+ Property *prop = opaque;
+ TPMBackend *s, **be = qdev_get_prop_ptr(dev, prop);
+ char *str;
+
+ if (dev->realized) {
+ qdev_prop_set_after_realize(dev, name, errp);
+ return;
+ }
+
+ visit_type_str(v, name, &str, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ s = qemu_find_tpm_be(str);
+ if (s == NULL) {
+ error_setg(errp, "Property '%s.%s' can't find value '%s'",
+ object_get_typename(obj), prop->name, str);
+ } else if (tpm_backend_init(s, TPM_IF(obj), errp) == 0) {
+ *be = s; /* weak reference, avoid cyclic ref */
+ }
+ g_free(str);
+}
+
+static void release_tpm(Object *obj, const char *name, void *opaque)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ TPMBackend **be = qdev_get_prop_ptr(dev, prop);
+
+ if (*be) {
+ tpm_backend_reset(*be);
+ }
+}
+
+const PropertyInfo qdev_prop_tpm = {
+ .name = "str",
+ .description = "ID of a tpm to use as a backend",
+ .get = get_tpm,
+ .set = set_tpm,
+ .release = release_tpm,
+};
/*
* Write an error message in the given output buffer.
@@ -50,13 +126,13 @@ bool tpm_util_is_selftest(const uint8_t *in, uint32_t in_len)
}
/*
- * A basic test of a TPM device. We expect a well formatted response header
- * (error response is fine) within one second.
+ * Send request to a TPM device. We expect a response within one second.
*/
-static int tpm_util_test(int fd,
- unsigned char *request,
- size_t requestlen,
- uint16_t *return_tag)
+static int tpm_util_request(int fd,
+ unsigned char *request,
+ size_t requestlen,
+ unsigned char *response,
+ size_t responselen)
{
struct tpm_resp_hdr *resp;
fd_set readfds;
@@ -65,7 +141,6 @@ static int tpm_util_test(int fd,
.tv_sec = 1,
.tv_usec = 0,
};
- unsigned char buf[1024];
n = write(fd, request, requestlen);
if (n < 0) {
@@ -84,17 +159,40 @@ static int tpm_util_test(int fd,
return -errno;
}
- n = read(fd, &buf, sizeof(buf));
+ n = read(fd, response, responselen);
if (n < sizeof(struct tpm_resp_hdr)) {
return -EFAULT;
}
- resp = (struct tpm_resp_hdr *)buf;
+ resp = (struct tpm_resp_hdr *)response;
/* check the header */
if (be32_to_cpu(resp->len) != n) {
return -EMSGSIZE;
}
+ return 0;
+}
+
+/*
+ * A basic test of a TPM device. We expect a well formatted response header
+ * (error response is fine).
+ */
+static int tpm_util_test(int fd,
+ unsigned char *request,
+ size_t requestlen,
+ uint16_t *return_tag)
+{
+ struct tpm_resp_hdr *resp;
+ unsigned char buf[1024];
+ ssize_t ret;
+
+ ret = tpm_util_request(fd, request, requestlen,
+ buf, sizeof(buf));
+ if (ret < 0) {
+ return ret;
+ }
+
+ resp = (struct tpm_resp_hdr *)buf;
*return_tag = be16_to_cpu(resp->tag);
return 0;
@@ -151,3 +249,116 @@ int tpm_util_test_tpmdev(int tpm_fd, TPMVersion *tpm_version)
return 1;
}
+
+int tpm_util_get_buffer_size(int tpm_fd, TPMVersion tpm_version,
+ size_t *buffersize)
+{
+ unsigned char buf[1024];
+ int ret;
+
+ switch (tpm_version) {
+ case TPM_VERSION_1_2: {
+ const struct tpm_req_get_buffer_size {
+ struct tpm_req_hdr hdr;
+ uint32_t capability;
+ uint32_t len;
+ uint32_t subcap;
+ } QEMU_PACKED tpm_get_buffer_size = {
+ .hdr = {
+ .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
+ .len = cpu_to_be32(sizeof(tpm_get_buffer_size)),
+ .ordinal = cpu_to_be32(TPM_ORD_GetCapability),
+ },
+ .capability = cpu_to_be32(TPM_CAP_PROPERTY),
+ .len = cpu_to_be32(sizeof(uint32_t)),
+ .subcap = cpu_to_be32(TPM_CAP_PROP_INPUT_BUFFER),
+ };
+ struct tpm_resp_get_buffer_size {
+ struct tpm_resp_hdr hdr;
+ uint32_t len;
+ uint32_t buffersize;
+ } QEMU_PACKED *tpm_resp = (struct tpm_resp_get_buffer_size *)buf;
+
+ ret = tpm_util_request(tpm_fd, (unsigned char *)&tpm_get_buffer_size,
+ sizeof(tpm_get_buffer_size), buf, sizeof(buf));
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (be32_to_cpu(tpm_resp->hdr.len) != sizeof(*tpm_resp) ||
+ be32_to_cpu(tpm_resp->len) != sizeof(uint32_t)) {
+ DPRINTF("tpm_resp->hdr.len = %u, expected = %zu\n",
+ be32_to_cpu(tpm_resp->hdr.len), sizeof(*tpm_resp));
+ DPRINTF("tpm_resp->len = %u, expected = %zu\n",
+ be32_to_cpu(tpm_resp->len), sizeof(uint32_t));
+ error_report("tpm_util: Got unexpected response to "
+ "TPM_GetCapability; errcode: 0x%x",
+ be32_to_cpu(tpm_resp->hdr.errcode));
+ return -EFAULT;
+ }
+ *buffersize = be32_to_cpu(tpm_resp->buffersize);
+ break;
+ }
+ case TPM_VERSION_2_0: {
+ const struct tpm2_req_get_buffer_size {
+ struct tpm_req_hdr hdr;
+ uint32_t capability;
+ uint32_t property;
+ uint32_t count;
+ } QEMU_PACKED tpm2_get_buffer_size = {
+ .hdr = {
+ .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
+ .len = cpu_to_be32(sizeof(tpm2_get_buffer_size)),
+ .ordinal = cpu_to_be32(TPM2_CC_GetCapability),
+ },
+ .capability = cpu_to_be32(TPM2_CAP_TPM_PROPERTIES),
+ .property = cpu_to_be32(TPM2_PT_MAX_COMMAND_SIZE),
+ .count = cpu_to_be32(2), /* also get TPM2_PT_MAX_RESPONSE_SIZE */
+ };
+ struct tpm2_resp_get_buffer_size {
+ struct tpm_resp_hdr hdr;
+ uint8_t more;
+ uint32_t capability;
+ uint32_t count;
+ uint32_t property1;
+ uint32_t value1;
+ uint32_t property2;
+ uint32_t value2;
+ } QEMU_PACKED *tpm2_resp = (struct tpm2_resp_get_buffer_size *)buf;
+
+ ret = tpm_util_request(tpm_fd, (unsigned char *)&tpm2_get_buffer_size,
+ sizeof(tpm2_get_buffer_size), buf, sizeof(buf));
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (be32_to_cpu(tpm2_resp->hdr.len) != sizeof(*tpm2_resp) ||
+ be32_to_cpu(tpm2_resp->count) != 2) {
+ DPRINTF("tpm2_resp->hdr.len = %u, expected = %zu\n",
+ be32_to_cpu(tpm2_resp->hdr.len), sizeof(*tpm2_resp));
+ DPRINTF("tpm2_resp->len = %u, expected = %u\n",
+ be32_to_cpu(tpm2_resp->count), 2);
+ error_report("tpm_util: Got unexpected response to "
+ "TPM2_GetCapability; errcode: 0x%x",
+ be32_to_cpu(tpm2_resp->hdr.errcode));
+ return -EFAULT;
+ }
+ *buffersize = MAX(be32_to_cpu(tpm2_resp->value1),
+ be32_to_cpu(tpm2_resp->value2));
+ break;
+ }
+ case TPM_VERSION_UNSPEC:
+ return -EFAULT;
+ }
+
+ DPRINTF("buffersize of device: %zu\n", *buffersize);
+
+ return 0;
+}
+
+void tpm_sized_buffer_reset(TPMSizedBuffer *tsb)
+{
+ g_free(tsb->buffer);
+ tsb->buffer = NULL;
+ tsb->size = 0;
+}
diff --git a/hw/tpm/tpm_util.h b/hw/tpm/tpm_util.h
index 2f7c96146d..19b28474ae 100644
--- a/hw/tpm/tpm_util.h
+++ b/hw/tpm/tpm_util.h
@@ -22,7 +22,8 @@
#ifndef TPM_TPM_UTIL_H
#define TPM_TPM_UTIL_H
-#include "sysemu/tpm_backend.h"
+#include "sysemu/tpm.h"
+#include "qemu/bswap.h"
void tpm_util_write_fatal_error_response(uint8_t *out, uint32_t out_len);
@@ -30,4 +31,22 @@ bool tpm_util_is_selftest(const uint8_t *in, uint32_t in_len);
int tpm_util_test_tpmdev(int tpm_fd, TPMVersion *tpm_version);
+static inline uint32_t tpm_cmd_get_size(const void *b)
+{
+ return be32_to_cpu(*(const uint32_t *)(b + 2));
+}
+
+int tpm_util_get_buffer_size(int tpm_fd, TPMVersion tpm_version,
+ size_t *buffersize);
+
+#define DEFINE_PROP_TPMBE(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_tpm, TPMBackend *)
+
+typedef struct TPMSizedBuffer {
+ uint32_t size;
+ uint8_t *buffer;
+} TPMSizedBuffer;
+
+void tpm_sized_buffer_reset(TPMSizedBuffer *tsb);
+
#endif /* TPM_TPM_UTIL_H */
diff --git a/hw/unicore32/puv3.c b/hw/unicore32/puv3.c
index 1b39cc035b..db26959a1d 100644
--- a/hw/unicore32/puv3.c
+++ b/hw/unicore32/puv3.c
@@ -11,16 +11,11 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
-#include "qemu-common.h"
#include "cpu.h"
#include "ui/console.h"
-#include "elf.h"
-#include "exec/address-spaces.h"
-#include "hw/sysbus.h"
#include "hw/boards.h"
#include "hw/loader.h"
#include "hw/i386/pc.h"
-#include "qemu/error-report.h"
#include "sysemu/qtest.h"
#undef DEBUG_PUV3
@@ -29,6 +24,16 @@
#define KERNEL_LOAD_ADDR 0x03000000
#define KERNEL_MAX_SIZE 0x00800000 /* Just a guess */
+/* PKUnity System bus (AHB): 0xc0000000 - 0xedffffff (640MB) */
+#define PUV3_DMA_BASE (0xc0200000) /* AHB-4 */
+
+/* PKUnity Peripheral bus (APB): 0xee000000 - 0xefffffff (128MB) */
+#define PUV3_GPIO_BASE (0xee500000) /* APB-5 */
+#define PUV3_INTC_BASE (0xee600000) /* APB-6 */
+#define PUV3_OST_BASE (0xee800000) /* APB-8 */
+#define PUV3_PM_BASE (0xeea00000) /* APB-10 */
+#define PUV3_PS2_BASE (0xeeb00000) /* APB-11 */
+
static void puv3_intc_cpu_handler(void *opaque, int irq, int level)
{
UniCore32CPU *cpu = opaque;
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
index e56dc3348a..11f7720d71 100644
--- a/hw/usb/bus.c
+++ b/hw/usb/bus.c
@@ -559,28 +559,6 @@ int usb_device_detach(USBDevice *dev)
return 0;
}
-int usb_device_delete_addr(int busnr, int addr)
-{
- USBBus *bus;
- USBPort *port;
- USBDevice *dev;
-
- bus = usb_bus_find(busnr);
- if (!bus)
- return -1;
-
- QTAILQ_FOREACH(port, &bus->used, next) {
- if (port->dev->addr == addr)
- break;
- }
- if (!port)
- return -1;
- dev = port->dev;
-
- object_unparent(OBJECT(dev));
- return 0;
-}
-
static const char *usb_speed(unsigned int speed)
{
static const char *txt[] = {
diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c
index 8a61ec94c8..9722ac854c 100644
--- a/hw/usb/dev-storage.c
+++ b/hw/usb/dev-storage.c
@@ -596,12 +596,11 @@ static void usb_msd_unrealize_storage(USBDevice *dev, Error **errp)
object_unref(OBJECT(&s->bus));
}
-static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
+static void usb_msd_storage_realize(USBDevice *dev, Error **errp)
{
MSDState *s = USB_STORAGE_DEV(dev);
BlockBackend *blk = s->conf.blk;
SCSIDevice *scsi_dev;
- Error *err = NULL;
if (!blk) {
error_setg(errp, "drive property not set");
@@ -610,9 +609,8 @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
blkconf_serial(&s->conf, &dev->serial);
blkconf_blocksizes(&s->conf);
- blkconf_apply_backend_options(&s->conf, blk_is_read_only(blk), true, &err);
- if (err) {
- error_propagate(errp, err);
+ if (!blkconf_apply_backend_options(&s->conf, blk_is_read_only(blk), true,
+ errp)) {
return;
}
@@ -636,24 +634,23 @@ static void usb_msd_realize_storage(USBDevice *dev, Error **errp)
&usb_msd_scsi_info_storage, NULL);
scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable,
s->conf.bootindex, dev->serial,
- &err);
+ errp);
blk_unref(blk);
if (!scsi_dev) {
- error_propagate(errp, err);
return;
}
usb_msd_handle_reset(dev);
s->scsi_dev = scsi_dev;
}
-static void usb_msd_unrealize_bot(USBDevice *dev, Error **errp)
+static void usb_msd_bot_unrealize(USBDevice *dev, Error **errp)
{
MSDState *s = USB_STORAGE_DEV(dev);
object_unref(OBJECT(&s->bus));
}
-static void usb_msd_realize_bot(USBDevice *dev, Error **errp)
+static void usb_msd_bot_realize(USBDevice *dev, Error **errp)
{
MSDState *s = USB_STORAGE_DEV(dev);
DeviceState *d = DEVICE(dev);
@@ -767,12 +764,12 @@ static void usb_msd_class_initfn_common(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_usb_msd;
}
-static void usb_msd_class_initfn_storage(ObjectClass *klass, void *data)
+static void usb_msd_class_storage_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
- uc->realize = usb_msd_realize_storage;
+ uc->realize = usb_msd_storage_realize;
uc->unrealize = usb_msd_unrealize_storage;
dc->props = msd_properties;
}
@@ -831,26 +828,26 @@ static void usb_msd_instance_init(Object *obj)
object_property_set_int(obj, -1, "bootindex", NULL);
}
-static void usb_msd_class_initfn_bot(ObjectClass *klass, void *data)
+static void usb_msd_class_bot_initfn(ObjectClass *klass, void *data)
{
USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
- uc->realize = usb_msd_realize_bot;
- uc->unrealize = usb_msd_unrealize_bot;
+ uc->realize = usb_msd_bot_realize;
+ uc->unrealize = usb_msd_bot_unrealize;
uc->attached_settable = true;
}
static const TypeInfo msd_info = {
.name = "usb-storage",
.parent = TYPE_USB_STORAGE,
- .class_init = usb_msd_class_initfn_storage,
+ .class_init = usb_msd_class_storage_initfn,
.instance_init = usb_msd_instance_init,
};
static const TypeInfo bot_info = {
.name = "usb-bot",
.parent = TYPE_USB_STORAGE,
- .class_init = usb_msd_class_initfn_bot,
+ .class_init = usb_msd_class_bot_initfn,
};
static void usb_msd_register_types(void)
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
index 636729c03d..16713f2c52 100644
--- a/hw/vfio/ccw.c
+++ b/hw/vfio/ccw.c
@@ -11,11 +11,11 @@
* directory.
*/
+#include "qemu/osdep.h"
#include <linux/vfio.h>
#include <linux/vfio_ccw.h>
#include <sys/ioctl.h>
-#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
#include "hw/vfio/vfio.h"
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 7b2924c0ef..b77be3a8b3 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -968,6 +968,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
group->container = container;
QLIST_INSERT_HEAD(&container->group_list, group, container_next);
+ vfio_kvm_device_add_group(group);
return 0;
}
}
@@ -990,6 +991,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container = g_malloc0(sizeof(*container));
container->space = space;
container->fd = fd;
+ QLIST_INIT(&container->giommu_list);
+ QLIST_INIT(&container->hostwin_list);
if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
@@ -1040,6 +1043,11 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
if (ret) {
+ container->iommu_type = VFIO_SPAPR_TCE_IOMMU;
+ v2 = false;
+ ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
+ }
+ if (ret) {
error_setg_errno(errp, errno, "failed to set iommu for container");
ret = -errno;
goto free_container_exit;
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index 502a5755b9..a8fb3b3422 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -93,8 +93,6 @@ typedef struct VFIOMSIXInfo {
uint16_t entries;
uint32_t table_offset;
uint32_t pba_offset;
- MemoryRegion mmap_mem;
- void *mmap;
unsigned long *pending;
} VFIOMSIXInfo;
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 5ec1c6a2a2..aa5af927e1 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -11,8 +11,8 @@
* top-level directory.
*/
-#include <sys/ioctl.h>
#include "qemu/osdep.h"
+#include <sys/ioctl.h>
#include "standard-headers/linux/virtio_vsock.h"
#include "qapi/error.h"
#include "hw/virtio/virtio-bus.h"
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index 37cde38982..14e08d20d0 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -18,7 +18,7 @@
#include "qemu/timer.h"
#include "qemu-common.h"
#include "hw/virtio/virtio.h"
-#include "hw/i386/pc.h"
+#include "hw/mem/pc-dimm.h"
#include "sysemu/balloon.h"
#include "hw/virtio/virtio-balloon.h"
#include "sysemu/kvm.h"
diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c
index 532afe89e7..d045032bf4 100644
--- a/hw/watchdog/wdt_ib700.c
+++ b/hw/watchdog/wdt_ib700.c
@@ -25,7 +25,6 @@
#include "sysemu/watchdog.h"
#include "hw/hw.h"
#include "hw/isa/isa.h"
-#include "hw/i386/pc.h"
/*#define IB700_DEBUG 1*/
diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c
index 9bba717708..d57c6d3485 100644
--- a/hw/xen/xen_pt.c
+++ b/hw/xen/xen_pt.c
@@ -946,6 +946,7 @@ static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
k->exit = xen_pt_unregister_device;
k->config_read = xen_pt_pci_read_config;
k->config_write = xen_pt_pci_write_config;
+ k->is_express = 1; /* We might be */
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->desc = "Assign an host PCI device with Xen";
dc->props = xen_pci_passthrough_properties;
diff --git a/include/block/block.h b/include/block/block.h
index c05cac57e5..9b12774ddf 100644
--- a/include/block/block.h
+++ b/include/block/block.h
@@ -585,7 +585,7 @@ void bdrv_io_unplug(BlockDriverState *bs);
* Begin a quiesced section of all users of @bs. This is part of
* bdrv_drained_begin.
*/
-void bdrv_parent_drained_begin(BlockDriverState *bs);
+void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore);
/**
* bdrv_parent_drained_end:
@@ -593,7 +593,7 @@ void bdrv_parent_drained_begin(BlockDriverState *bs);
* End a quiesced section of all users of @bs. This is part of
* bdrv_drained_end.
*/
-void bdrv_parent_drained_end(BlockDriverState *bs);
+void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore);
/**
* bdrv_drained_begin:
@@ -608,12 +608,23 @@ void bdrv_parent_drained_end(BlockDriverState *bs);
void bdrv_drained_begin(BlockDriverState *bs);
/**
+ * Like bdrv_drained_begin, but recursively begins a quiesced section for
+ * exclusive access to all child nodes as well.
+ */
+void bdrv_subtree_drained_begin(BlockDriverState *bs);
+
+/**
* bdrv_drained_end:
*
* End a quiescent section started by bdrv_drained_begin().
*/
void bdrv_drained_end(BlockDriverState *bs);
+/**
+ * End a quiescent section started by bdrv_subtree_drained_begin().
+ */
+void bdrv_subtree_drained_end(BlockDriverState *bs);
+
void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
Error **errp);
void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
diff --git a/include/block/block_int.h b/include/block/block_int.h
index a5482775ec..29cafa4236 100644
--- a/include/block/block_int.h
+++ b/include/block/block_int.h
@@ -717,6 +717,8 @@ struct BlockDriverState {
/* Accessed with atomic ops. */
int quiesce_counter;
+ int recursive_quiesce_counter;
+
unsigned int write_gen; /* Current data generation */
/* Protected by reqs_lock. */
@@ -768,6 +770,9 @@ int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
+void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
+void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
+
int get_tmp_filename(char *filename, int size);
BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
const char *filename);
@@ -1045,7 +1050,6 @@ bool blk_dev_is_tray_open(BlockBackend *blk);
bool blk_dev_is_medium_locked(BlockBackend *blk);
void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
-bool bdrv_requests_pending(BlockDriverState *bs);
void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
void bdrv_undo_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *in);
diff --git a/include/block/dirty-bitmap.h b/include/block/dirty-bitmap.h
index 3579a7597c..a591c27213 100644
--- a/include/block/dirty-bitmap.h
+++ b/include/block/dirty-bitmap.h
@@ -91,5 +91,6 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs);
BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap);
char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
+int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start);
#endif
diff --git a/include/chardev/char.h b/include/chardev/char.h
index 43aabccef5..778d610295 100644
--- a/include/chardev/char.h
+++ b/include/chardev/char.h
@@ -248,6 +248,7 @@ typedef struct ChardevClass {
void (*chr_accept_input)(Chardev *chr);
void (*chr_set_echo)(Chardev *chr, bool echo);
void (*chr_set_fe_open)(Chardev *chr, int fe_open);
+ void (*chr_be_event)(Chardev *s, int event);
} ChardevClass;
Chardev *qemu_chardev_new(const char *id, const char *typename,
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 0f51c92adb..b37f7d8d92 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -74,8 +74,9 @@ void cpu_reloading_memory_map(void);
/**
* cpu_address_space_init:
* @cpu: CPU to add this address space to
- * @as: address space to add
* @asidx: integer index of this address space
+ * @prefix: prefix to be used as name of address space
+ * @mr: the root memory region of address space
*
* Add the specified address space to the CPU's cpu_ases list.
* The address space added with @asidx 0 is the one used for the
@@ -89,7 +90,8 @@ void cpu_reloading_memory_map(void);
*
* Note that with KVM only one address space is supported.
*/
-void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
+void cpu_address_space_init(CPUState *cpu, int asidx,
+ const char *prefix, MemoryRegion *mr);
#endif
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index 049bba86e9..54aaa61d65 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -5,7 +5,7 @@
/* Helpers for instruction counting code generation. */
-static int icount_start_insn_idx;
+static TCGOp *icount_start_insn;
static inline void gen_tb_start(TranslationBlock *tb)
{
@@ -26,8 +26,8 @@ static inline void gen_tb_start(TranslationBlock *tb)
/* We emit a movi with a dummy immediate argument. Keep the insn index
* of the movi so that we later (when we know the actual insn count)
* can update the immediate argument with the actual insn count. */
- icount_start_insn_idx = tcg_op_buf_count();
tcg_gen_movi_i32(imm, 0xdeadbeef);
+ icount_start_insn = tcg_last_op();
tcg_gen_sub_i32(count, count, imm);
tcg_temp_free_i32(imm);
@@ -48,14 +48,11 @@ static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
if (tb_cflags(tb) & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know
* the actual insn count. */
- tcg_set_insn_param(icount_start_insn_idx, 1, num_insns);
+ tcg_set_insn_param(icount_start_insn, 1, num_insns);
}
gen_set_label(tcg_ctx->exitreq_label);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
-
- /* Terminate the linked list. */
- tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].next = 0;
}
static inline void gen_io_start(void)
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
index 15204ab961..22381a1708 100644
--- a/include/exec/helper-gen.h
+++ b/include/exec/helper-gen.h
@@ -56,6 +56,16 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
}
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
+{ \
+ TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
+ tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \
+}
+
#include "helper.h"
#include "trace/generated-helpers.h"
#include "trace/generated-helpers-wrappers.h"
@@ -67,6 +77,7 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
#undef DEF_HELPER_FLAGS_3
#undef DEF_HELPER_FLAGS_4
#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
#undef GEN_HELPER
#endif /* HELPER_GEN_H */
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
index 639eefdbc0..e1fd08f2ba 100644
--- a/include/exec/helper-head.h
+++ b/include/exec/helper-head.h
@@ -125,6 +125,8 @@
DEF_HELPER_FLAGS_4(name, 0, ret, t1, t2, t3, t4)
#define DEF_HELPER_5(name, ret, t1, t2, t3, t4, t5) \
DEF_HELPER_FLAGS_5(name, 0, ret, t1, t2, t3, t4, t5)
+#define DEF_HELPER_6(name, ret, t1, t2, t3, t4, t5, t6) \
+ DEF_HELPER_FLAGS_6(name, 0, ret, t1, t2, t3, t4, t5, t6)
/* MAX_OPC_PARAM_IARGS must be set to n if last entry is DEF_HELPER_FLAGS_n. */
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
index 954bef85ce..74943edb13 100644
--- a/include/exec/helper-proto.h
+++ b/include/exec/helper-proto.h
@@ -26,6 +26,10 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
dh_ctype(t4), dh_ctype(t5));
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6));
+
#include "helper.h"
#include "trace/generated-helpers.h"
#include "tcg-runtime.h"
@@ -36,5 +40,6 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
#undef DEF_HELPER_FLAGS_3
#undef DEF_HELPER_FLAGS_4
#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
#endif /* HELPER_PROTO_H */
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
index b0c5bafa99..b3bdb0c399 100644
--- a/include/exec/helper-tcg.h
+++ b/include/exec/helper-tcg.h
@@ -39,6 +39,12 @@
| dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
| dh_sizemask(t5, 5) },
+#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
+ { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \
+ .sizemask = dh_sizemask(ret, 0) | dh_sizemask(t1, 1) \
+ | dh_sizemask(t2, 2) | dh_sizemask(t3, 3) | dh_sizemask(t4, 4) \
+ | dh_sizemask(t5, 5) | dh_sizemask(t6, 6) },
+
#include "helper.h"
#include "trace/generated-helpers.h"
#include "tcg-runtime.h"
@@ -50,5 +56,6 @@
#undef DEF_HELPER_FLAGS_3
#undef DEF_HELPER_FLAGS_4
#undef DEF_HELPER_FLAGS_5
+#undef DEF_HELPER_FLAGS_6
#endif /* HELPER_TCG_H */
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 5ed4042f87..a4cabdf44c 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -1241,18 +1241,6 @@ void memory_region_set_flush_coalesced(MemoryRegion *mr);
void memory_region_clear_flush_coalesced(MemoryRegion *mr);
/**
- * memory_region_set_global_locking: Declares the access processing requires
- * QEMU's global lock.
- *
- * When this is invoked, accesses to the memory region will be processed while
- * holding the global lock of QEMU. This is the default behavior of memory
- * regions.
- *
- * @mr: the memory region to be updated.
- */
-void memory_region_set_global_locking(MemoryRegion *mr);
-
-/**
* memory_region_clear_global_locking: Declares that access processing does
* not depend on the QEMU global lock.
*
diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h
index 72be675dd6..80c8099a23 100644
--- a/include/hw/acpi/acpi-defs.h
+++ b/include/hw/acpi/acpi-defs.h
@@ -558,8 +558,8 @@ typedef struct Acpi20Tcpa Acpi20Tcpa;
/*
* TPM2
*
- * Following Level 00, Rev 00.37 of specs:
- * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification
+ * Following Version 1.2, Revision 8 of specs:
+ * https://trustedcomputinggroup.org/tcg-acpi-specification/
*/
struct Acpi20TPM2 {
ACPI_TABLE_HEADER_DEF
@@ -567,6 +567,9 @@ struct Acpi20TPM2 {
uint16_t reserved;
uint64_t control_area_address;
uint32_t start_method;
+ uint8_t start_method_params[12];
+ uint32_t log_area_minimum_length;
+ uint64_t log_area_start_address;
} QEMU_PACKED;
typedef struct Acpi20TPM2 Acpi20TPM2;
diff --git a/include/hw/acpi/acpi.h b/include/hw/acpi/acpi.h
index 7b3d93cf0d..39ff512129 100644
--- a/include/hw/acpi/acpi.h
+++ b/include/hw/acpi/acpi.h
@@ -39,6 +39,17 @@
#define ACPI_PM2_REGISTER_WIDTH 8
#define ACPI_PM_TIMER_WIDTH 32
+/* PC-style peripherals (also used by other machines). */
+#define ACPI_PM_PROP_S3_DISABLED "disable_s3"
+#define ACPI_PM_PROP_S4_DISABLED "disable_s4"
+#define ACPI_PM_PROP_S4_VAL "s4_val"
+#define ACPI_PM_PROP_SCI_INT "sci_int"
+#define ACPI_PM_PROP_ACPI_ENABLE_CMD "acpi_enable_cmd"
+#define ACPI_PM_PROP_ACPI_DISABLE_CMD "acpi_disable_cmd"
+#define ACPI_PM_PROP_PM_IO_BASE "pm_io_base"
+#define ACPI_PM_PROP_GPE0_BLK "gpe0_blk"
+#define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len"
+
/* PM Timer ticks per second (HZ) */
#define PM_TIMER_FREQUENCY 3579545
diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h
index a352c94fde..59aeb06393 100644
--- a/include/hw/acpi/ich9.h
+++ b/include/hw/acpi/ich9.h
@@ -63,6 +63,8 @@ typedef struct ICH9LPCPMRegs {
TCOIORegs tco_regs;
} ICH9LPCPMRegs;
+#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
+
void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
bool smm_enabled,
qemu_irq sci_irq);
diff --git a/include/hw/acpi/ipmi.h b/include/hw/acpi/ipmi.h
index ab2bb29048..c38483565c 100644
--- a/include/hw/acpi/ipmi.h
+++ b/include/hw/acpi/ipmi.h
@@ -9,7 +9,6 @@
#ifndef HW_ACPI_IPMI_H
#define HW_ACPI_IPMI_H
-#include "qemu/osdep.h"
#include "hw/acpi/aml-build.h"
/*
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 6eff81a995..3e6fb9b7bd 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -40,6 +40,10 @@
#define XLNX_ZYNQMP_NUM_SDHCI 2
#define XLNX_ZYNQMP_NUM_SPIS 2
+#define XLNX_ZYNQMP_NUM_QSPI_BUS 2
+#define XLNX_ZYNQMP_NUM_QSPI_BUS_CS 2
+#define XLNX_ZYNQMP_NUM_QSPI_FLASH 4
+
#define XLNX_ZYNQMP_NUM_OCM_BANKS 4
#define XLNX_ZYNQMP_OCM_RAM_0_ADDRESS 0xFFFC0000
#define XLNX_ZYNQMP_OCM_RAM_SIZE 0x10000
@@ -83,6 +87,7 @@ typedef struct XlnxZynqMPState {
SysbusAHCIState sata;
SDHCIState sdhci[XLNX_ZYNQMP_NUM_SDHCI];
XilinxSPIPS spi[XLNX_ZYNQMP_NUM_SPIS];
+ XlnxZynqMPQSPIPS qspi;
XlnxDPState dp;
XlnxDPDMAState dpdma;
diff --git a/include/hw/block/block.h b/include/hw/block/block.h
index f3f6e8ef02..64b9298829 100644
--- a/include/hw/block/block.h
+++ b/include/hw/block/block.h
@@ -72,11 +72,11 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf)
/* Configuration helpers */
void blkconf_serial(BlockConf *conf, char **serial);
-void blkconf_geometry(BlockConf *conf, int *trans,
+bool blkconf_geometry(BlockConf *conf, int *trans,
unsigned cyls_max, unsigned heads_max, unsigned secs_max,
Error **errp);
void blkconf_blocksizes(BlockConf *conf);
-void blkconf_apply_backend_options(BlockConf *conf, bool readonly,
+bool blkconf_apply_backend_options(BlockConf *conf, bool readonly,
bool resizable, Error **errp);
/* Hard disk geometry */
diff --git a/include/hw/compat.h b/include/hw/compat.h
index cf389b4e85..263de973a7 100644
--- a/include/hw/compat.h
+++ b/include/hw/compat.h
@@ -1,6 +1,9 @@
#ifndef HW_COMPAT_H
#define HW_COMPAT_H
+#define HW_COMPAT_2_11 \
+ /* empty */
+
#define HW_COMPAT_2_10 \
{\
.driver = "virtio-mouse-device",\
diff --git a/include/hw/cpu/core.h b/include/hw/cpu/core.h
index 79ac79c29c..b7470644d8 100644
--- a/include/hw/cpu/core.h
+++ b/include/hw/cpu/core.h
@@ -9,7 +9,6 @@
#ifndef HW_CPU_CORE_H
#define HW_CPU_CORE_H
-#include "qemu/osdep.h"
#include "hw/qdev.h"
#define TYPE_CPU_CORE "cpu-core"
diff --git a/include/hw/display/vga.h b/include/hw/display/vga.h
new file mode 100644
index 0000000000..0401a3a292
--- /dev/null
+++ b/include/hw/display/vga.h
@@ -0,0 +1,25 @@
+/*
+ * QEMU VGA Emulator.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_HW_DISPLAY_VGA_H
+#define QEMU_HW_DISPLAY_VGA_H
+
+#include "exec/memory.h"
+
+enum vga_retrace_method {
+ VGA_RETRACE_DUMB,
+ VGA_RETRACE_PRECISE
+};
+
+extern enum vga_retrace_method vga_retrace_method;
+
+int isa_vga_mm_init(hwaddr vram_base,
+ hwaddr ctrl_base, int it_shift,
+ MemoryRegion *address_space);
+
+#endif
diff --git a/include/hw/i2c/ppc4xx_i2c.h b/include/hw/i2c/ppc4xx_i2c.h
index e53042f6d4..3450bda577 100644
--- a/include/hw/i2c/ppc4xx_i2c.h
+++ b/include/hw/i2c/ppc4xx_i2c.h
@@ -25,7 +25,6 @@
#ifndef PPC4XX_I2C_H
#define PPC4XX_I2C_H
-#include "qemu/osdep.h"
#include "qemu-common.h"
#include "hw/sysbus.h"
#include "hw/i2c/i2c.h"
diff --git a/include/hw/i386/apic.h b/include/hw/i386/apic.h
index ea48ea9389..a9f6c0aa33 100644
--- a/include/hw/i386/apic.h
+++ b/include/hw/i386/apic.h
@@ -20,6 +20,7 @@ void apic_init_reset(DeviceState *s);
void apic_sipi(DeviceState *s);
void apic_poll_irq(DeviceState *d);
void apic_designate_bsp(DeviceState *d, bool bsp);
+int apic_get_highest_priority_irr(DeviceState *dev);
/* pc.c */
DeviceState *cpu_get_current_apic(void);
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index ef438bd765..6f77eb0665 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -151,19 +151,6 @@ struct PCMachineClass {
#define PC_MACHINE_CLASS(klass) \
OBJECT_CLASS_CHECK(PCMachineClass, (klass), TYPE_PC_MACHINE)
-/* PC-style peripherals (also used by other machines). */
-
-#define ACPI_PM_PROP_S3_DISABLED "disable_s3"
-#define ACPI_PM_PROP_S4_DISABLED "disable_s4"
-#define ACPI_PM_PROP_S4_VAL "s4_val"
-#define ACPI_PM_PROP_SCI_INT "sci_int"
-#define ACPI_PM_PROP_ACPI_ENABLE_CMD "acpi_enable_cmd"
-#define ACPI_PM_PROP_ACPI_DISABLE_CMD "acpi_disable_cmd"
-#define ACPI_PM_PROP_PM_IO_BASE "pm_io_base"
-#define ACPI_PM_PROP_GPE0_BLK "gpe0_blk"
-#define ACPI_PM_PROP_GPE0_BLK_LEN "gpe0_blk_len"
-#define ACPI_PM_PROP_TCO_ENABLED "enable_tco"
-
/* parallel.c */
void parallel_hds_isa_init(ISABus *bus, int n);
@@ -315,45 +302,10 @@ PCIBus *find_i440fx(void);
extern PCIDevice *piix4_dev;
int piix4_init(PCIBus *bus, ISABus **isa_bus, int devfn);
-/* vga.c */
-enum vga_retrace_method {
- VGA_RETRACE_DUMB,
- VGA_RETRACE_PRECISE
-};
-
-extern enum vga_retrace_method vga_retrace_method;
-
-int isa_vga_mm_init(hwaddr vram_base,
- hwaddr ctrl_base, int it_shift,
- MemoryRegion *address_space);
-
-/* ne2000.c */
-static inline bool isa_ne2000_init(ISABus *bus, int base, int irq, NICInfo *nd)
-{
- DeviceState *dev;
- ISADevice *isadev;
-
- qemu_check_nic_model(nd, "ne2k_isa");
-
- isadev = isa_try_create(bus, "ne2k_isa");
- if (!isadev) {
- return false;
- }
- dev = DEVICE(isadev);
- qdev_prop_set_uint32(dev, "iobase", base);
- qdev_prop_set_uint32(dev, "irq", irq);
- qdev_set_nic_properties(dev, nd);
- qdev_init_nofail(dev);
- return true;
-}
-
/* pc_sysfw.c */
void pc_system_firmware_init(MemoryRegion *rom_memory,
bool isapc_ram_fw);
-/* pvpanic.c */
-uint16_t pvpanic_port(void);
-
/* acpi-build.c */
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
const CPUArchIdList *apic_ids, GArray *entry);
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
index ac7997ca8c..8bc29112e3 100644
--- a/include/hw/intc/armv7m_nvic.h
+++ b/include/hw/intc/armv7m_nvic.h
@@ -78,13 +78,15 @@ typedef struct NVICState {
MemoryRegion sysregmem;
MemoryRegion sysreg_ns_mem;
+ MemoryRegion systickmem;
+ MemoryRegion systick_ns_mem;
MemoryRegion container;
uint32_t num_irq;
qemu_irq excpout;
qemu_irq sysresetreq;
- SysTickState systick;
+ SysTickState systick[M_REG_NUM_BANKS];
} NVICState;
#endif
diff --git a/include/hw/isa/i8259_internal.h b/include/hw/isa/i8259_internal.h
index 6954b6ec5f..f742c2a726 100644
--- a/include/hw/isa/i8259_internal.h
+++ b/include/hw/isa/i8259_internal.h
@@ -28,6 +28,7 @@
#include "hw/hw.h"
#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
+#include "hw/intc/intc.h"
typedef struct PICCommonState PICCommonState;
@@ -76,8 +77,10 @@ struct PICCommonState {
};
void pic_reset_common(PICCommonState *s);
-
ISADevice *i8259_init_chip(const char *name, ISABus *bus, bool master);
-
+void pic_stat_update_irq(int irq, int level);
+bool pic_get_statistics(InterruptStatsProvider *obj,
+ uint64_t **irq_counts, unsigned int *nb_irqs);
+void pic_print_info(InterruptStatsProvider *obj, Monitor *mon);
#endif /* QEMU_I8259_INTERNAL_H */
diff --git a/include/hw/misc/pvpanic.h b/include/hw/misc/pvpanic.h
new file mode 100644
index 0000000000..36a54e270c
--- /dev/null
+++ b/include/hw/misc/pvpanic.h
@@ -0,0 +1,21 @@
+/*
+ * QEMU simulated pvpanic device.
+ *
+ * Copyright Fujitsu, Corp. 2013
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ * Hu Tao <hutao@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef HW_MISC_PVPANIC_H
+#define HW_MISC_PVPANIC_H
+
+#define TYPE_PVPANIC "pvpanic"
+
+uint16_t pvpanic_port(void);
+
+#endif
diff --git a/include/hw/net/ne2000-isa.h b/include/hw/net/ne2000-isa.h
new file mode 100644
index 0000000000..ff2bed9c95
--- /dev/null
+++ b/include/hw/net/ne2000-isa.h
@@ -0,0 +1,33 @@
+/*
+ * QEMU NE2000 emulation -- isa bus windup
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "hw/hw.h"
+#include "hw/qdev.h"
+#include "hw/isa/isa.h"
+#include "net/net.h"
+
+#define TYPE_ISA_NE2000 "ne2k_isa"
+
+static inline ISADevice *isa_ne2000_init(ISABus *bus, int base, int irq,
+ NICInfo *nd)
+{
+ ISADevice *d;
+
+ qemu_check_nic_model(nd, "ne2k_isa");
+
+ d = isa_try_create(bus, TYPE_ISA_NE2000);
+ if (d) {
+ DeviceState *dev = DEVICE(d);
+
+ qdev_prop_set_uint32(dev, "iobase", base);
+ qdev_prop_set_uint32(dev, "irq", irq);
+ qdev_set_nic_properties(dev, nd);
+ qdev_init_nofail(dev);
+ }
+ return d;
+}
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
index 38470b2f0e..0fae4fc6a4 100644
--- a/include/hw/pci-host/spapr.h
+++ b/include/hw/pci-host/spapr.h
@@ -108,7 +108,7 @@ static inline qemu_irq spapr_phb_lsi_qirq(struct sPAPRPHBState *phb, int pin)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- return xics_get_qirq(XICS_FABRIC(spapr), phb->lsi_table[pin].irq);
+ return spapr_qirq(spapr, phb->lsi_table[pin].irq);
}
PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index);
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 9d21ca9bde..14757b805e 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -590,6 +590,16 @@ void spapr_load_rtas(sPAPRMachineState *spapr, void *fdt, hwaddr addr);
#define RTAS_EVENT_SCAN_RATE 1
+/* This helper should be used to encode interrupt specifiers when the related
+ * "interrupt-controller" node has its "#interrupt-cells" property set to 2 (ie,
+ * VIO devices, RTAS event sources and PHBs).
+ */
+static inline void spapr_dt_xics_irq(uint32_t *intspec, int irq, bool is_lsi)
+{
+ intspec[0] = cpu_to_be32(irq);
+ intspec[1] = is_lsi ? cpu_to_be32(1) : 0;
+}
+
typedef struct sPAPRTCETable sPAPRTCETable;
#define TYPE_SPAPR_TCE_TABLE "spapr-tce-table"
@@ -707,4 +717,11 @@ void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg);
int spapr_vcpu_id(PowerPCCPU *cpu);
PowerPCCPU *spapr_find_cpu(int vcpu_id);
+int spapr_irq_alloc(sPAPRMachineState *spapr, int irq_hint, bool lsi,
+ Error **errp);
+int spapr_irq_alloc_block(sPAPRMachineState *spapr, int num, bool lsi,
+ bool align, Error **errp);
+void spapr_irq_free(sPAPRMachineState *spapr, int irq, int num);
+qemu_irq spapr_qirq(sPAPRMachineState *spapr, int irq);
+
#endif /* HW_SPAPR_H */
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index f2d48d6a67..1129f344aa 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -28,7 +28,7 @@ typedef struct sPAPRCPUCore {
CPUCore parent_obj;
/*< public >*/
- void *threads;
+ PowerPCCPU **threads;
int node_id;
} sPAPRCPUCore;
diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h
index 2e9685a5d9..e8b006d18f 100644
--- a/include/hw/ppc/spapr_vio.h
+++ b/include/hw/ppc/spapr_vio.h
@@ -87,7 +87,7 @@ static inline qemu_irq spapr_vio_qirq(VIOsPAPRDevice *dev)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
- return xics_get_qirq(XICS_FABRIC(spapr), dev->irq);
+ return spapr_qirq(spapr, dev->irq);
}
static inline bool spapr_vio_dma_valid(VIOsPAPRDevice *dev, uint64_t taddr,
diff --git a/include/hw/ppc/xics.h b/include/hw/ppc/xics.h
index 2df99be111..6cebff47a7 100644
--- a/include/hw/ppc/xics.h
+++ b/include/hw/ppc/xics.h
@@ -181,13 +181,8 @@ typedef struct XICSFabricClass {
#define XICS_IRQS_SPAPR 1024
-int spapr_ics_alloc(ICSState *ics, int irq_hint, bool lsi, Error **errp);
-int spapr_ics_alloc_block(ICSState *ics, int num, bool lsi, bool align,
- Error **errp);
-void spapr_ics_free(ICSState *ics, int irq, int num);
void spapr_dt_xics(int nr_servers, void *fdt, uint32_t phandle);
-qemu_irq xics_get_qirq(XICSFabric *xi, int irq);
ICPState *xics_icp_get(XICSFabric *xi, int server);
/* Internal XICS interfaces */
@@ -212,4 +207,7 @@ typedef struct sPAPRMachineState sPAPRMachineState;
int xics_kvm_init(sPAPRMachineState *spapr, Error **errp);
void xics_spapr_init(sPAPRMachineState *spapr);
+Object *icp_create(Object *cpu, const char *type, XICSFabric *xi,
+ Error **errp);
+
#endif /* XICS_H */
diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h
index e2321f1cc1..60b42ac561 100644
--- a/include/hw/qdev-properties.h
+++ b/include/hw/qdev-properties.h
@@ -17,6 +17,7 @@ extern const PropertyInfo qdev_prop_int64;
extern const PropertyInfo qdev_prop_size;
extern const PropertyInfo qdev_prop_string;
extern const PropertyInfo qdev_prop_chr;
+extern const PropertyInfo qdev_prop_tpm;
extern const PropertyInfo qdev_prop_ptr;
extern const PropertyInfo qdev_prop_macaddr;
extern const PropertyInfo qdev_prop_on_off_auto;
diff --git a/include/hw/registerfields.h b/include/hw/registerfields.h
index af101d5ae6..44e0b94edf 100644
--- a/include/hw/registerfields.h
+++ b/include/hw/registerfields.h
@@ -11,6 +11,8 @@
#ifndef REGISTERFIELDS_H
#define REGISTERFIELDS_H
+#include <qemu/bitops.h>
+
/* Define constants for a 32 bit register */
/* This macro will define A_FOO, for the byte address of a register
@@ -22,7 +24,7 @@
/* Define SHIFT, LENGTH and MASK constants for a field within a register */
-/* This macro will define FOO_BAR_MASK, FOO_BAR_SHIFT and FOO_BAR_LENGTH
+/* This macro will define R_FOO_BAR_MASK, R_FOO_BAR_SHIFT and R_FOO_BAR_LENGTH
* constants for field BAR in register FOO.
*/
#define FIELD(reg, field, shift, length) \
@@ -35,6 +37,9 @@
#define FIELD_EX32(storage, reg, field) \
extract32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH)
+#define FIELD_EX64(storage, reg, field) \
+ extract64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH)
/* Extract a field from an array of registers */
#define ARRAY_FIELD_EX32(regs, reg, field) \
@@ -52,6 +57,14 @@
d = deposit32((storage), R_ ## reg ## _ ## field ## _SHIFT, \
R_ ## reg ## _ ## field ## _LENGTH, v.v); \
d; })
+#define FIELD_DP64(storage, reg, field, val) ({ \
+ struct { \
+ unsigned int v:R_ ## reg ## _ ## field ## _LENGTH; \
+ } v = { .v = val }; \
+ uint64_t d; \
+ d = deposit64((storage), R_ ## reg ## _ ## field ## _SHIFT, \
+ R_ ## reg ## _ ## field ## _LENGTH, v.v); \
+ d; })
/* Deposit a field to array of registers. */
#define ARRAY_FIELD_DP32(regs, reg, field, val) \
diff --git a/include/hw/s390x/css.h b/include/hw/s390x/css.h
index ab6ebe66b5..35facb47d2 100644
--- a/include/hw/s390x/css.h
+++ b/include/hw/s390x/css.h
@@ -248,7 +248,6 @@ int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
void css_do_tsch_update_subch(SubchDev *sch);
int css_do_stcrw(CRW *crw);
void css_undo_stcrw(CRW *crw);
-int css_do_tpi(IOIntCode *int_code, int lowcore);
int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
int rfmt, void *buf);
void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
@@ -272,12 +271,9 @@ extern const PropertyInfo css_devid_ro_propinfo;
* default css image for it.
* If @p bus_id is valid, and @p squash_mcss is false, verify that it is
* not already in use, and find a free devno for it.
- * If @p bus_id is not valid, and if either @p squash_mcss or @p is_virtual
- * is true, find a free subchannel id and device number across all
- * subchannel sets from the default css image.
- * If @p bus_id is not valid, and if both @p squash_mcss and @p is_virtual
- * are false, find a non-full css image and find a free subchannel id and
- * device number across all subchannel sets from it.
+ * If @p bus_id is not valid find a free subchannel id and device number
+ * across all subchannel sets and all css images starting from the default
+ * css image.
*
* If either of the former actions succeed, allocate a subchannel structure,
* initialise it with the bus id, subchannel id and device number, register
@@ -286,8 +282,7 @@ extern const PropertyInfo css_devid_ro_propinfo;
* The caller becomes owner of the returned subchannel structure and
* is responsible for unregistering and freeing it.
*/
-SubchDev *css_create_sch(CssDevId bus_id, bool is_virtual, bool squash_mcss,
- Error **errp);
+SubchDev *css_create_sch(CssDevId bus_id, bool squash_mcss, Error **errp);
/** Turn on css migration */
void css_register_vmstate(void);
diff --git a/include/hw/ssi/xilinx_spips.h b/include/hw/ssi/xilinx_spips.h
index 06aa09629d..d398a4e81c 100644
--- a/include/hw/ssi/xilinx_spips.h
+++ b/include/hw/ssi/xilinx_spips.h
@@ -26,11 +26,29 @@
#define XILINX_SPIPS_H
#include "hw/ssi/ssi.h"
-#include "qemu/fifo8.h"
+#include "qemu/fifo32.h"
+#include "hw/stream.h"
typedef struct XilinxSPIPS XilinxSPIPS;
#define XLNX_SPIPS_R_MAX (0x100 / 4)
+#define XLNX_ZYNQMP_SPIPS_R_MAX (0x830 / 4)
+
+/* Bite off 4k chunks at a time */
+#define LQSPI_CACHE_SIZE 1024
+
+typedef enum {
+ READ = 0x3, READ_4 = 0x13,
+ FAST_READ = 0xb, FAST_READ_4 = 0x0c,
+ DOR = 0x3b, DOR_4 = 0x3c,
+ QOR = 0x6b, QOR_4 = 0x6c,
+ DIOR = 0xbb, DIOR_4 = 0xbc,
+ QIOR = 0xeb, QIOR_4 = 0xec,
+
+ PP = 0x2, PP_4 = 0x12,
+ DPP = 0xa2,
+ QPP = 0x32, QPP_4 = 0x34,
+} FlashCMD;
struct XilinxSPIPS {
SysBusDevice parent_obj;
@@ -45,19 +63,70 @@ struct XilinxSPIPS {
uint8_t num_busses;
uint8_t snoop_state;
+ int cmd_dummies;
+ uint8_t link_state;
+ uint8_t link_state_next;
+ uint8_t link_state_next_when;
qemu_irq *cs_lines;
+ bool *cs_lines_state;
SSIBus **spi;
Fifo8 rx_fifo;
Fifo8 tx_fifo;
uint8_t num_txrx_bytes;
+ uint32_t rx_discard;
uint32_t regs[XLNX_SPIPS_R_MAX];
+
+ bool man_start_com;
};
+typedef struct {
+ XilinxSPIPS parent_obj;
+
+ uint8_t lqspi_buf[LQSPI_CACHE_SIZE];
+ hwaddr lqspi_cached_addr;
+ Error *migration_blocker;
+ bool mmio_execution_enabled;
+} XilinxQSPIPS;
+
+typedef struct {
+ XilinxQSPIPS parent_obj;
+
+ StreamSlave *dma;
+ uint8_t dma_buf[4];
+ int gqspi_irqline;
+
+ uint32_t regs[XLNX_ZYNQMP_SPIPS_R_MAX];
+
+ /* GQSPI has seperate tx/rx fifos */
+ Fifo8 rx_fifo_g;
+ Fifo8 tx_fifo_g;
+ Fifo32 fifo_g;
+ /*
+ * At the end of each generic command, misaligned extra bytes are discard
+ * or padded to tx and rx respectively to round it out (and avoid need for
+ * individual byte access. Since we use byte fifos, keep track of the
+ * alignment WRT to word access.
+ */
+ uint8_t rx_fifo_g_align;
+ uint8_t tx_fifo_g_align;
+ bool man_start_com_g;
+} XlnxZynqMPQSPIPS;
+
+typedef struct XilinxSPIPSClass {
+ SysBusDeviceClass parent_class;
+
+ const MemoryRegionOps *reg_ops;
+
+ uint32_t rx_fifo_size;
+ uint32_t tx_fifo_size;
+} XilinxSPIPSClass;
+
#define TYPE_XILINX_SPIPS "xlnx.ps7-spi"
#define TYPE_XILINX_QSPIPS "xlnx.ps7-qspi"
+#define TYPE_XLNX_ZYNQMP_QSPIPS "xlnx.usmp-gqspi"
#define XILINX_SPIPS(obj) \
OBJECT_CHECK(XilinxSPIPS, (obj), TYPE_XILINX_SPIPS)
@@ -69,4 +138,7 @@ struct XilinxSPIPS {
#define XILINX_QSPIPS(obj) \
OBJECT_CHECK(XilinxQSPIPS, (obj), TYPE_XILINX_QSPIPS)
+#define XLNX_ZYNQMP_QSPIPS(obj) \
+ OBJECT_CHECK(XlnxZynqMPQSPIPS, (obj), TYPE_XLNX_ZYNQMP_QSPIPS)
+
#endif /* XILINX_SPIPS_H */
diff --git a/include/hw/timer/i8254.h b/include/hw/timer/i8254.h
index 5adae9fa44..5b12eb918e 100644
--- a/include/hw/timer/i8254.h
+++ b/include/hw/timer/i8254.h
@@ -26,6 +26,7 @@
#define HW_I8254_H
#include "hw/hw.h"
+#include "hw/qdev.h"
#include "hw/isa/isa.h"
#define PIT_FREQ 1193182
@@ -48,8 +49,8 @@ typedef struct PITChannelInfo {
#define TYPE_I8254 "isa-pit"
#define TYPE_KVM_I8254 "kvm-pit"
-static inline ISADevice *pit_init(ISABus *bus, int base, int isa_irq,
- qemu_irq alt_irq)
+static inline ISADevice *i8254_pit_init(ISABus *bus, int base, int isa_irq,
+ qemu_irq alt_irq)
{
DeviceState *dev;
ISADevice *d;
diff --git a/include/hw/timer/i8254_internal.h b/include/hw/timer/i8254_internal.h
index dc09cc0467..c37a438f82 100644
--- a/include/hw/timer/i8254_internal.h
+++ b/include/hw/timer/i8254_internal.h
@@ -26,8 +26,8 @@
#define QEMU_I8254_INTERNAL_H
#include "hw/hw.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
+#include "qemu/timer.h"
typedef struct PITChannelState {
int count; /* can be 65536 */
diff --git a/include/hw/timer/mc146818rtc.h b/include/hw/timer/mc146818rtc.h
index 7c8e64b203..fe6ed63f71 100644
--- a/include/hw/timer/mc146818rtc.h
+++ b/include/hw/timer/mc146818rtc.h
@@ -6,7 +6,8 @@
#define TYPE_MC146818_RTC "mc146818rtc"
-ISADevice *rtc_init(ISABus *bus, int base_year, qemu_irq intercept_irq);
+ISADevice *mc146818_rtc_init(ISABus *bus, int base_year,
+ qemu_irq intercept_irq);
void rtc_set_memory(ISADevice *dev, int addr, int val);
int rtc_get_memory(ISADevice *dev, int addr);
diff --git a/include/hw/unicore32/puv3.h b/include/hw/unicore32/puv3.h
index 5a4839f8df..f587a1f622 100644
--- a/include/hw/unicore32/puv3.h
+++ b/include/hw/unicore32/puv3.h
@@ -14,16 +14,6 @@
#define PUV3_REGS_OFFSET (0x1000) /* 4K is reasonable */
-/* PKUnity System bus (AHB): 0xc0000000 - 0xedffffff (640MB) */
-#define PUV3_DMA_BASE (0xc0200000) /* AHB-4 */
-
-/* PKUnity Peripheral bus (APB): 0xee000000 - 0xefffffff (128MB) */
-#define PUV3_GPIO_BASE (0xee500000) /* APB-5 */
-#define PUV3_INTC_BASE (0xee600000) /* APB-6 */
-#define PUV3_OST_BASE (0xee800000) /* APB-8 */
-#define PUV3_PM_BASE (0xeea00000) /* APB-10 */
-#define PUV3_PS2_BASE (0xeeb00000) /* APB-11 */
-
/* Hardware interrupts */
#define PUV3_IRQS_NR (32)
diff --git a/include/hw/usb.h b/include/hw/usb.h
index eb28655270..9dd9c6f0d9 100644
--- a/include/hw/usb.h
+++ b/include/hw/usb.h
@@ -549,7 +549,6 @@ void usb_claim_port(USBDevice *dev, Error **errp);
void usb_release_port(USBDevice *dev);
void usb_device_attach(USBDevice *dev, Error **errp);
int usb_device_detach(USBDevice *dev);
-int usb_device_delete_addr(int busnr, int addr);
void usb_check_attach(USBDevice *dev, Error **errp);
static inline USBBus *usb_bus_from_device(USBDevice *d)
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index d3c8a6fa8c..5117431d96 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -39,6 +39,7 @@ struct VirtIOBlkConf
uint32_t config_wce;
uint32_t request_merging;
uint16_t num_queues;
+ uint16_t queue_size;
};
struct VirtIOBlockDataPlane;
diff --git a/include/io/net-listener.h b/include/io/net-listener.h
new file mode 100644
index 0000000000..56d6da7a76
--- /dev/null
+++ b/include/io/net-listener.h
@@ -0,0 +1,174 @@
+/*
+ * QEMU network listener
+ *
+ * Copyright (c) 2016-2017 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef QIO_NET_LISTENER_H
+#define QIO_NET_LISTENER_H
+
+#include "io/channel-socket.h"
+
+#define TYPE_QIO_NET_LISTENER "qio-net-listener"
+#define QIO_NET_LISTENER(obj) \
+ OBJECT_CHECK(QIONetListener, (obj), TYPE_QIO_NET_LISTENER)
+#define QIO_NET_LISTENER_CLASS(klass) \
+ OBJECT_CLASS_CHECK(QIONetListenerClass, klass, TYPE_QIO_NET_LISTENER)
+#define QIO_NET_LISTENER_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(QIONetListenerClass, obj, TYPE_QIO_NET_LISTENER)
+
+typedef struct QIONetListener QIONetListener;
+typedef struct QIONetListenerClass QIONetListenerClass;
+
+typedef void (*QIONetListenerClientFunc)(QIONetListener *listener,
+ QIOChannelSocket *sioc,
+ gpointer data);
+
+/**
+ * QIONetListener:
+ *
+ * The QIONetListener object encapsulates the management of a
+ * listening socket. It is able to listen on multiple sockets
+ * concurrently, to deal with the scenario where IPv4 / IPv6
+ * needs separate sockets, or there is a need to listen on a
+ * subset of interface IP addresses, instead of the wildcard
+ * address.
+ */
+struct QIONetListener {
+ Object parent;
+
+ char *name;
+ QIOChannelSocket **sioc;
+ gulong *io_tag;
+ size_t nsioc;
+
+ bool connected;
+
+ QIONetListenerClientFunc io_func;
+ gpointer io_data;
+ GDestroyNotify io_notify;
+};
+
+struct QIONetListenerClass {
+ ObjectClass parent;
+};
+
+
+/**
+ * qio_net_listener_new:
+ *
+ * Create a new network listener service, which is not
+ * listening on any sockets initially.
+ *
+ * Returns: the new listener
+ */
+QIONetListener *qio_net_listener_new(void);
+
+
+/**
+ * qio_net_listener_set_name:
+ * @listener: the network listener object
+ * @name: the listener name
+ *
+ * Set the name of the listener. This is used as a debugging
+ * aid, to set names on any GSource instances associated
+ * with the listener
+ */
+void qio_net_listener_set_name(QIONetListener *listener,
+ const char *name);
+
+/**
+ * qio_net_listener_open_sync:
+ * @listener: the network listener object
+ * @addr: the address to listen on
+ * @errp: pointer to a NULL initialized error object
+ *
+ * Synchronously open a listening connection on all
+ * addresses associated with @addr. This method may
+ * also be invoked multiple times, in order to have a
+ * single listener on multiple distinct addresses.
+ */
+int qio_net_listener_open_sync(QIONetListener *listener,
+ SocketAddress *addr,
+ Error **errp);
+
+/**
+ * qio_net_listener_add:
+ * @listener: the network listener object
+ * @sioc: the socket I/O channel
+ *
+ * Associate a listening socket I/O channel with the
+ * listener. The listener will acquire a new reference
+ * on @sioc, so the caller should release its own reference
+ * if it no longer requires the object.
+ */
+void qio_net_listener_add(QIONetListener *listener,
+ QIOChannelSocket *sioc);
+
+/**
+ * qio_net_listener_set_client_func:
+ * @listener: the network listener object
+ * @func: the callback function
+ * @data: opaque data to pass to @func
+ * @notify: callback to free @data
+ *
+ * Register @func to be invoked whenever a new client
+ * connects to the listener. @func will be invoked
+ * passing in the QIOChannelSocket instance for the
+ * client.
+ */
+void qio_net_listener_set_client_func(QIONetListener *listener,
+ QIONetListenerClientFunc func,
+ gpointer data,
+ GDestroyNotify notify);
+
+/**
+ * qio_net_listener_wait_client:
+ * @listener: the network listener object
+ *
+ * Block execution of the caller until a new client arrives
+ * on one of the listening sockets. If there was previously
+ * a callback registered with qio_net_listener_set_client_func
+ * it will be temporarily disabled, and re-enabled afterwards.
+ *
+ * Returns: the new client socket
+ */
+QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener);
+
+
+/**
+ * qio_net_listener_disconnect:
+ * @listener: the network listener object
+ *
+ * Disconnect the listener, removing all I/O callback
+ * watches and closing the socket channels.
+ */
+void qio_net_listener_disconnect(QIONetListener *listener);
+
+
+/**
+ * qio_net_listener_is_connected:
+ * @listener: the network listener object
+ *
+ * Determine if the listener is connected to any socket
+ * channels
+ *
+ * Returns: true if connected, false otherwise
+ */
+bool qio_net_listener_is_connected(QIONetListener *listener);
+
+#endif /* QIO_NET_LISTENER_H */
diff --git a/include/net/net.h b/include/net/net.h
index 1c55a93588..4afac1a9dd 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -227,7 +227,10 @@ NetClientState *net_hub_port_find(int hub_id);
void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd);
-#define POLYNOMIAL 0x04c11db6
+#define POLYNOMIAL_BE 0x04c11db6
+#define POLYNOMIAL_LE 0xedb88320
+uint32_t net_crc32(const uint8_t *p, int len);
+uint32_t net_crc32_le(const uint8_t *p, int len);
unsigned compute_mcast_idx(const uint8_t *ep);
#define vmstate_offset_macaddr(_state, _field) \
diff --git a/include/net/slirp.h b/include/net/slirp.h
index 64b795cda9..0c98e463db 100644
--- a/include/net/slirp.h
+++ b/include/net/slirp.h
@@ -36,8 +36,6 @@ void hmp_hostfwd_remove(Monitor *mon, const QDict *qdict);
int net_slirp_redir(const char *redir_str);
-int net_slirp_parse_legacy(QemuOptsList *opts_list, const char *optarg, int *ret);
-
int net_slirp_smb(const char *exported_dir);
void hmp_info_usernet(Monitor *mon, const QDict *qdict);
diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h
index 9aff9a735e..ce2eb73670 100644
--- a/include/qemu/coroutine.h
+++ b/include/qemu/coroutine.h
@@ -261,12 +261,8 @@ void qemu_co_rwlock_unlock(CoRwlock *lock);
/**
* Yield the coroutine for a given duration
- *
- * Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be
- * resumed when using aio_poll().
*/
-void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
- int64_t ns);
+void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns);
/**
* Yield until a file descriptor becomes readable
diff --git a/include/qemu/hbitmap.h b/include/qemu/hbitmap.h
index 81e78043d1..6b6490ecad 100644
--- a/include/qemu/hbitmap.h
+++ b/include/qemu/hbitmap.h
@@ -292,6 +292,14 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
*/
unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
+/* hbitmap_next_zero:
+ * @hb: The HBitmap to operate on
+ * @start: The bit to start from.
+ *
+ * Find next not dirty bit.
+ */
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start);
+
/* hbitmap_create_meta:
* Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
* The caller owns the created bitmap and must call hbitmap_free_meta(hb) to
diff --git a/include/qemu/option.h b/include/qemu/option.h
index f7338dbe80..a88c5f02b1 100644
--- a/include/qemu/option.h
+++ b/include/qemu/option.h
@@ -31,11 +31,6 @@
const char *get_opt_name(char *buf, int buf_size, const char *p, char delim);
const char *get_opt_value(char *buf, int buf_size, const char *p);
-int get_next_param_value(char *buf, int buf_size,
- const char *tag, const char **pstr);
-int get_param_value(char *buf, int buf_size,
- const char *tag, const char *str);
-
void parse_option_size(const char *name, const char *value,
uint64_t *ret, Error **errp);
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index e8568a0a54..adb3758275 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -365,6 +365,9 @@ void qemu_anon_ram_free(void *ptr, size_t size);
#elif defined(__linux__) && defined(__s390x__)
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
# define QEMU_VMALLOC_ALIGN (256 * 4096)
+#elif defined(__linux__) && defined(__sparc__)
+#include <sys/shm.h>
+# define QEMU_VMALLOC_ALIGN MAX(getpagesize(), SHMLBA)
#else
# define QEMU_VMALLOC_ALIGN getpagesize()
#endif
diff --git a/include/qemu/qht.h b/include/qemu/qht.h
index 56c2c7784c..531aa95325 100644
--- a/include/qemu/qht.h
+++ b/include/qemu/qht.h
@@ -166,7 +166,7 @@ void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp);
/**
* qht_statistics_init - Gather statistics from a QHT
* @ht: QHT to gather statistics from
- * @stats: pointer to a struct qht_stats to be filled in
+ * @stats: pointer to a &struct qht_stats to be filled in
*
* Does NOT need to be called under an RCU read-critical section,
* since it does not dereference any pointers stored in the hash table.
@@ -177,8 +177,8 @@ void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp);
void qht_statistics_init(struct qht *ht, struct qht_stats *stats);
/**
- * qht_statistics_destroy - Destroy a struct qht_stats
- * @stats: stuct qht_stats to be destroyed
+ * qht_statistics_destroy - Destroy a &struct qht_stats
+ * @stats: &struct qht_stats to be destroyed
*
* See also: qht_statistics_init().
*/
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index 35292c3155..aa270d2b38 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -425,6 +425,11 @@ struct { \
(var); \
(var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+#define QTAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev_var) \
+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
+ (var) && ((prev_var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)), 1); \
+ (var) = (prev_var))
+
/*
* Tail queue access methods.
*/
diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h
index 4f7311b52a..8889bcb1ec 100644
--- a/include/qemu/sockets.h
+++ b/include/qemu/sockets.h
@@ -35,7 +35,7 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp);
NetworkAddressFamily inet_netfamily(int family);
-int unix_listen(const char *path, char *ostr, int olen, Error **errp);
+int unix_listen(const char *path, Error **errp);
int unix_connect(const char *path, Error **errp);
SocketAddress *socket_parse(const char *str, Error **errp);
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index 3dbc69b1e9..9bd7a834ba 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -36,6 +36,7 @@ typedef struct FWCfgIoState FWCfgIoState;
typedef struct FWCfgMemState FWCfgMemState;
typedef struct FWCfgState FWCfgState;
typedef struct HCIInfo HCIInfo;
+typedef struct HVFX86EmulatorState HVFX86EmulatorState;
typedef struct I2CBus I2CBus;
typedef struct I2SCodec I2SCodec;
typedef struct ISABus ISABus;
diff --git a/include/qemu/uuid.h b/include/qemu/uuid.h
index afe4840296..09489ce5c5 100644
--- a/include/qemu/uuid.h
+++ b/include/qemu/uuid.h
@@ -48,6 +48,8 @@ void qemu_uuid_generate(QemuUUID *out);
int qemu_uuid_is_null(const QemuUUID *uu);
+int qemu_uuid_is_equal(const QemuUUID *lhv, const QemuUUID *rhv);
+
void qemu_uuid_unparse(const QemuUUID *uuid, char *out);
char *qemu_uuid_unparse_strdup(const QemuUUID *uuid);
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index c2fa151228..93bd546879 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -423,6 +423,8 @@ struct CPUState {
* unnecessary flushes.
*/
uint16_t pending_tlb_flush;
+
+ int hvf_fd;
};
QTAILQ_HEAD(CPUTailQ, CPUState);
diff --git a/include/scsi/utils.h b/include/scsi/utils.h
index 00a4bdb080..4b705f5e0f 100644
--- a/include/scsi/utils.h
+++ b/include/scsi/utils.h
@@ -31,6 +31,9 @@ typedef struct SCSISense {
} SCSISense;
int scsi_build_sense(uint8_t *buf, SCSISense sense);
+SCSISense scsi_parse_sense_buf(const uint8_t *in_buf, int in_len);
+int scsi_build_sense_buf(uint8_t *buf, size_t max_size, SCSISense sense,
+ bool fixed_sense);
/*
* Predefined sense codes
@@ -76,7 +79,11 @@ extern const struct SCSISense sense_code_LUN_FAILURE;
extern const struct SCSISense sense_code_LUN_COMM_FAILURE;
/* Command aborted, Overlapped Commands Attempted */
extern const struct SCSISense sense_code_OVERLAPPED_COMMANDS;
-/* LUN not ready, Capacity data has changed */
+/* Medium error, Unrecovered read error */
+extern const struct SCSISense sense_code_READ_ERROR;
+/* LUN not ready, Cause not reportable */
+extern const struct SCSISense sense_code_NOT_READY;
+/* Unit attention, Capacity data has changed */
extern const struct SCSISense sense_code_CAPACITY_CHANGED;
/* Unit attention, SCSI bus reset */
extern const struct SCSISense sense_code_SCSI_BUS_RESET;
diff --git a/include/standard-headers/asm-s390/virtio-ccw.h b/include/standard-headers/asm-s390/virtio-ccw.h
index a9a4ebf79f..967aad3901 100644
--- a/include/standard-headers/asm-s390/virtio-ccw.h
+++ b/include/standard-headers/asm-s390/virtio-ccw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Definitions for virtio-ccw devices.
*
diff --git a/include/standard-headers/asm-x86/hyperv.h b/include/standard-headers/asm-x86/hyperv.h
index 5f95d5ed02..ce87d0c344 100644
--- a/include/standard-headers/asm-x86/hyperv.h
+++ b/include/standard-headers/asm-x86/hyperv.h
@@ -1,393 +1 @@
-#ifndef _ASM_X86_HYPERV_H
-#define _ASM_X86_HYPERV_H
-
-#include "standard-headers/linux/types.h"
-
-/*
- * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
- * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
- */
-#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
-#define HYPERV_CPUID_INTERFACE 0x40000001
-#define HYPERV_CPUID_VERSION 0x40000002
-#define HYPERV_CPUID_FEATURES 0x40000003
-#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
-#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
-
-#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
-#define HYPERV_CPUID_MIN 0x40000005
-#define HYPERV_CPUID_MAX 0x4000ffff
-
-/*
- * Feature identification. EAX indicates which features are available
- * to the partition based upon the current partition privileges.
- */
-
-/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
-#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
-/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
-#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
-/* Partition reference TSC MSR is available */
-#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE (1 << 9)
-
-/* A partition's reference time stamp counter (TSC) page */
-#define HV_X64_MSR_REFERENCE_TSC 0x40000021
-
-/*
- * There is a single feature flag that signifies if the partition has access
- * to MSRs with local APIC and TSC frequencies.
- */
-#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11)
-
-/*
- * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
- * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
- */
-#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
-/*
- * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
- * HV_X64_MSR_STIMER3_COUNT) available
- */
-#define HV_X64_MSR_SYNTIMER_AVAILABLE (1 << 3)
-/*
- * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
- * are available
- */
-#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4)
-/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
-#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5)
-/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
-#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6)
-/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
-#define HV_X64_MSR_RESET_AVAILABLE (1 << 7)
- /*
- * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
- * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
- * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
- */
-#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
-
-/* Frequency MSRs available */
-#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE (1 << 8)
-
-/* Crash MSR available */
-#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
-
-/*
- * Feature identification: EBX indicates which flags were specified at
- * partition creation. The format is the same as the partition creation
- * flag structure defined in section Partition Creation Flags.
- */
-#define HV_X64_CREATE_PARTITIONS (1 << 0)
-#define HV_X64_ACCESS_PARTITION_ID (1 << 1)
-#define HV_X64_ACCESS_MEMORY_POOL (1 << 2)
-#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3)
-#define HV_X64_POST_MESSAGES (1 << 4)
-#define HV_X64_SIGNAL_EVENTS (1 << 5)
-#define HV_X64_CREATE_PORT (1 << 6)
-#define HV_X64_CONNECT_PORT (1 << 7)
-#define HV_X64_ACCESS_STATS (1 << 8)
-#define HV_X64_DEBUGGING (1 << 11)
-#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12)
-#define HV_X64_CONFIGURE_PROFILER (1 << 13)
-
-/*
- * Feature identification. EDX indicates which miscellaneous features
- * are available to the partition.
- */
-/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
-#define HV_X64_MWAIT_AVAILABLE (1 << 0)
-/* Guest debugging support is available */
-#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1)
-/* Performance Monitor support is available*/
-#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2)
-/* Support for physical CPU dynamic partitioning events is available*/
-#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3)
-/*
- * Support for passing hypercall input parameter block via XMM
- * registers is available
- */
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
-/* Support for a virtual guest idle state is available */
-#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
-/* Guest crash data handler available */
-#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
-
-/*
- * Implementation recommendations. Indicates which behaviors the hypervisor
- * recommends the OS implement for optimal performance.
- */
- /*
- * Recommend using hypercall for address space switches rather
- * than MOV to CR3 instruction
- */
-#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0)
-/* Recommend using hypercall for local TLB flushes rather
- * than INVLPG or MOV to CR3 instructions */
-#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
-/*
- * Recommend using hypercall for remote TLB flushes rather
- * than inter-processor interrupts
- */
-#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2)
-/*
- * Recommend using MSRs for accessing APIC registers
- * EOI, ICR and TPR rather than their memory-mapped counterparts
- */
-#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3)
-/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
-#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4)
-/*
- * Recommend using relaxed timing for this partition. If used,
- * the VM should disable any watchdog timeouts that rely on the
- * timely delivery of external interrupts
- */
-#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
-
-/*
- * Virtual APIC support
- */
-#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
-
-/* Recommend using the newer ExProcessorMasks interface */
-#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
-
-/*
- * Crash notification flag.
- */
-#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
-
-/* MSR used to identify the guest OS. */
-#define HV_X64_MSR_GUEST_OS_ID 0x40000000
-
-/* MSR used to setup pages used to communicate with the hypervisor. */
-#define HV_X64_MSR_HYPERCALL 0x40000001
-
-/* MSR used to provide vcpu index */
-#define HV_X64_MSR_VP_INDEX 0x40000002
-
-/* MSR used to reset the guest OS. */
-#define HV_X64_MSR_RESET 0x40000003
-
-/* MSR used to provide vcpu runtime in 100ns units */
-#define HV_X64_MSR_VP_RUNTIME 0x40000010
-
-/* MSR used to read the per-partition time reference counter */
-#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
-
-/* MSR used to retrieve the TSC frequency */
-#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
-
-/* MSR used to retrieve the local APIC timer frequency */
-#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
-
-/* Define the virtual APIC registers */
-#define HV_X64_MSR_EOI 0x40000070
-#define HV_X64_MSR_ICR 0x40000071
-#define HV_X64_MSR_TPR 0x40000072
-#define HV_X64_MSR_APIC_ASSIST_PAGE 0x40000073
-
-/* Define synthetic interrupt controller model specific registers. */
-#define HV_X64_MSR_SCONTROL 0x40000080
-#define HV_X64_MSR_SVERSION 0x40000081
-#define HV_X64_MSR_SIEFP 0x40000082
-#define HV_X64_MSR_SIMP 0x40000083
-#define HV_X64_MSR_EOM 0x40000084
-#define HV_X64_MSR_SINT0 0x40000090
-#define HV_X64_MSR_SINT1 0x40000091
-#define HV_X64_MSR_SINT2 0x40000092
-#define HV_X64_MSR_SINT3 0x40000093
-#define HV_X64_MSR_SINT4 0x40000094
-#define HV_X64_MSR_SINT5 0x40000095
-#define HV_X64_MSR_SINT6 0x40000096
-#define HV_X64_MSR_SINT7 0x40000097
-#define HV_X64_MSR_SINT8 0x40000098
-#define HV_X64_MSR_SINT9 0x40000099
-#define HV_X64_MSR_SINT10 0x4000009A
-#define HV_X64_MSR_SINT11 0x4000009B
-#define HV_X64_MSR_SINT12 0x4000009C
-#define HV_X64_MSR_SINT13 0x4000009D
-#define HV_X64_MSR_SINT14 0x4000009E
-#define HV_X64_MSR_SINT15 0x4000009F
-
-/*
- * Synthetic Timer MSRs. Four timers per vcpu.
- */
-#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
-#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
-#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
-#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
-#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
-#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
-#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
-#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
-
-/* Hyper-V guest crash notification MSR's */
-#define HV_X64_MSR_CRASH_P0 0x40000100
-#define HV_X64_MSR_CRASH_P1 0x40000101
-#define HV_X64_MSR_CRASH_P2 0x40000102
-#define HV_X64_MSR_CRASH_P3 0x40000103
-#define HV_X64_MSR_CRASH_P4 0x40000104
-#define HV_X64_MSR_CRASH_CTL 0x40000105
-#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63)
-#define HV_X64_MSR_CRASH_PARAMS \
- (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
-
-#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
-#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
-
-/* Declare the various hypercall operations. */
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
-#define HVCALL_POST_MESSAGE 0x005c
-#define HVCALL_SIGNAL_EVENT 0x005d
-
-#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
-#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
-#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \
- (~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
-
-#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
-#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
-
-#define HV_PROCESSOR_POWER_STATE_C0 0
-#define HV_PROCESSOR_POWER_STATE_C1 1
-#define HV_PROCESSOR_POWER_STATE_C2 2
-#define HV_PROCESSOR_POWER_STATE_C3 3
-
-#define HV_FLUSH_ALL_PROCESSORS BIT(0)
-#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
-#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
-#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
-
-enum HV_GENERIC_SET_FORMAT {
- HV_GENERIC_SET_SPARCE_4K,
- HV_GENERIC_SET_ALL,
-};
-
-/* hypercall status code */
-#define HV_STATUS_SUCCESS 0
-#define HV_STATUS_INVALID_HYPERCALL_CODE 2
-#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
-#define HV_STATUS_INVALID_ALIGNMENT 4
-#define HV_STATUS_INSUFFICIENT_MEMORY 11
-#define HV_STATUS_INVALID_CONNECTION_ID 18
-#define HV_STATUS_INSUFFICIENT_BUFFERS 19
-
-typedef struct _HV_REFERENCE_TSC_PAGE {
- uint32_t tsc_sequence;
- uint32_t res1;
- uint64_t tsc_scale;
- int64_t tsc_offset;
-} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
-
-/* Define the number of synthetic interrupt sources. */
-#define HV_SYNIC_SINT_COUNT (16)
-/* Define the expected SynIC version. */
-#define HV_SYNIC_VERSION_1 (0x1)
-
-#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SINT_MASKED (1ULL << 16)
-#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
-#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
-
-#define HV_SYNIC_STIMER_COUNT (4)
-
-/* Define synthetic interrupt controller message constants. */
-#define HV_MESSAGE_SIZE (256)
-#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
-#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
-
-/* Define hypervisor message types. */
-enum hv_message_type {
- HVMSG_NONE = 0x00000000,
-
- /* Memory access messages. */
- HVMSG_UNMAPPED_GPA = 0x80000000,
- HVMSG_GPA_INTERCEPT = 0x80000001,
-
- /* Timer notification messages. */
- HVMSG_TIMER_EXPIRED = 0x80000010,
-
- /* Error messages. */
- HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
- HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
- HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
-
- /* Trace buffer complete messages. */
- HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
-
- /* Platform-specific processor intercept messages. */
- HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
- HVMSG_X64_MSR_INTERCEPT = 0x80010001,
- HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
- HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
- HVMSG_X64_APIC_EOI = 0x80010004,
- HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
-};
-
-/* Define synthetic interrupt controller message flags. */
-union hv_message_flags {
- uint8_t asu8;
- struct {
- uint8_t msg_pending:1;
- uint8_t reserved:7;
- };
-};
-
-/* Define port identifier type. */
-union hv_port_id {
- uint32_t asu32;
- struct {
- uint32_t id:24;
- uint32_t reserved:8;
- } u;
-};
-
-/* Define synthetic interrupt controller message header. */
-struct hv_message_header {
- uint32_t message_type;
- uint8_t payload_size;
- union hv_message_flags message_flags;
- uint8_t reserved[2];
- union {
- uint64_t sender;
- union hv_port_id port;
- };
-};
-
-/* Define synthetic interrupt controller message format. */
-struct hv_message {
- struct hv_message_header header;
- union {
- uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
- } u;
-};
-
-/* Define the synthetic interrupt message page layout. */
-struct hv_message_page {
- struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
-};
-
-/* Define timer message payload structure. */
-struct hv_timer_message_payload {
- uint32_t timer_index;
- uint32_t reserved;
- uint64_t expiration_time; /* When the timer expired */
- uint64_t delivery_time; /* When the message was delivered */
-};
-
-#define HV_STIMER_ENABLE (1ULL << 0)
-#define HV_STIMER_PERIODIC (1ULL << 1)
-#define HV_STIMER_LAZY (1ULL << 2)
-#define HV_STIMER_AUTOENABLE (1ULL << 3)
-#define HV_STIMER_SINT(config) (uint8_t)(((config) >> 16) & 0x0F)
-
-#endif
+ /* this is a temporary placeholder until kvm_para.h stops including it */
diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h
index 2fa0f4ea6b..79841b543f 100644
--- a/include/standard-headers/linux/input-event-codes.h
+++ b/include/standard-headers/linux/input-event-codes.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Input event codes
*
@@ -406,6 +407,7 @@
#define BTN_TOOL_MOUSE 0x146
#define BTN_TOOL_LENS 0x147
#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */
+#define BTN_STYLUS3 0x149
#define BTN_TOUCH 0x14a
#define BTN_STYLUS 0x14b
#define BTN_STYLUS2 0x14c
diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h
index 666e201ddb..bc3e6d3d5b 100644
--- a/include/standard-headers/linux/input.h
+++ b/include/standard-headers/linux/input.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (c) 1999-2002 Vojtech Pavlik
*
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
index f8d5804592..70c2b2ade0 100644
--- a/include/standard-headers/linux/pci_regs.h
+++ b/include/standard-headers/linux/pci_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* pci_regs.h
*
@@ -746,6 +747,7 @@
#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
+#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
/* Virtual Channel */
@@ -939,9 +941,13 @@
#define PCI_SATA_SIZEOF_LONG 16
/* Resizable BARs */
+#define PCI_REBAR_CAP 4 /* capability register */
+#define PCI_REBAR_CAP_SIZES 0x00FFFFF0 /* supported BAR sizes */
#define PCI_REBAR_CTRL 8 /* control register */
-#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
-#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
+#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */
+#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */
+#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */
+#define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */
/* Dynamic Power Allocation */
#define PCI_DPA_CAP 4 /* capability register */
@@ -960,6 +966,7 @@
/* Downstream Port Containment */
#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_IRQ 0x1f /* DPC Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x20 /* Root Port Extensions for DPC */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x40 /* Poisoned TLP Egress Blocking Supported */
#define PCI_EXP_DPC_CAP_SW_TRIGGER 0x80 /* Software Triggering Supported */
@@ -995,19 +1002,25 @@
#define PCI_PTM_CTRL_ENABLE 0x00000001 /* PTM enable */
#define PCI_PTM_CTRL_ROOT 0x00000002 /* Root select */
-/* L1 PM Substates */
-#define PCI_L1SS_CAP 4 /* capability register */
-#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */
-#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
-#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */
-#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */
-#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */
-#define PCI_L1SS_CTL1 8 /* Control Register 1 */
-#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */
-#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
-#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */
-#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */
-#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F
-#define PCI_L1SS_CTL2 0xC /* Control Register 2 */
+/* ASPM L1 PM Substates */
+#define PCI_L1SS_CAP 0x04 /* Capabilities Register */
+#define PCI_L1SS_CAP_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Supported */
+#define PCI_L1SS_CAP_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Supported */
+#define PCI_L1SS_CAP_ASPM_L1_2 0x00000004 /* ASPM L1.2 Supported */
+#define PCI_L1SS_CAP_ASPM_L1_1 0x00000008 /* ASPM L1.1 Supported */
+#define PCI_L1SS_CAP_L1_PM_SS 0x00000010 /* L1 PM Substates Supported */
+#define PCI_L1SS_CAP_CM_RESTORE_TIME 0x0000ff00 /* Port Common_Mode_Restore_Time */
+#define PCI_L1SS_CAP_P_PWR_ON_SCALE 0x00030000 /* Port T_POWER_ON scale */
+#define PCI_L1SS_CAP_P_PWR_ON_VALUE 0x00f80000 /* Port T_POWER_ON value */
+#define PCI_L1SS_CTL1 0x08 /* Control 1 Register */
+#define PCI_L1SS_CTL1_PCIPM_L1_2 0x00000001 /* PCI-PM L1.2 Enable */
+#define PCI_L1SS_CTL1_PCIPM_L1_1 0x00000002 /* PCI-PM L1.1 Enable */
+#define PCI_L1SS_CTL1_ASPM_L1_2 0x00000004 /* ASPM L1.2 Enable */
+#define PCI_L1SS_CTL1_ASPM_L1_1 0x00000008 /* ASPM L1.1 Enable */
+#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000f
+#define PCI_L1SS_CTL1_CM_RESTORE_TIME 0x0000ff00 /* Common_Mode_Restore_Time */
+#define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */
+#define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */
+#define PCI_L1SS_CTL2 0x0c /* Control 2 Register */
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h
index 232a68ab1b..f252399623 100644
--- a/include/sysemu/hax.h
+++ b/include/sysemu/hax.h
@@ -22,7 +22,6 @@
#ifndef QEMU_HAX_H
#define QEMU_HAX_H
-#include "config-host.h"
#include "qemu-common.h"
int hax_sync_vcpus(void);
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
new file mode 100644
index 0000000000..e4e43f6468
--- /dev/null
+++ b/include/sysemu/hvf.h
@@ -0,0 +1,107 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * Copyright Google Inc., 2017
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/* header to be included in non-HVF-specific code */
+#ifndef _HVF_H
+#define _HVF_H
+
+#include "config-host.h"
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/bitops.h"
+#include "exec/memory.h"
+#include "sysemu/accel.h"
+
+extern int hvf_disabled;
+#ifdef CONFIG_HVF
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+#include <Hypervisor/hv_error.h>
+#include "target/i386/cpu.h"
+#include "hw/hw.h"
+uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
+ int reg);
+#define hvf_enabled() !hvf_disabled
+#else
+#define hvf_enabled() 0
+#define hvf_get_supported_cpuid(func, idx, reg) 0
+#endif
+
+/* hvf_slot flags */
+#define HVF_SLOT_LOG (1 << 0)
+
+typedef struct hvf_slot {
+ uint64_t start;
+ uint64_t size;
+ uint8_t *mem;
+ int slot_id;
+ uint32_t flags;
+ MemoryRegion *region;
+} hvf_slot;
+
+typedef struct hvf_vcpu_caps {
+ uint64_t vmx_cap_pinbased;
+ uint64_t vmx_cap_procbased;
+ uint64_t vmx_cap_procbased2;
+ uint64_t vmx_cap_entry;
+ uint64_t vmx_cap_exit;
+ uint64_t vmx_cap_preemption_timer;
+} hvf_vcpu_caps;
+
+typedef struct HVFState {
+ AccelState parent;
+ hvf_slot slots[32];
+ int num_slots;
+
+ hvf_vcpu_caps *hvf_caps;
+} HVFState;
+extern HVFState *hvf_state;
+
+void hvf_set_phys_mem(MemoryRegionSection *, bool);
+void hvf_handle_io(CPUArchState *, uint16_t, void *,
+ int, int, int);
+hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
+
+/* Disable HVF if |disable| is 1, otherwise, enable it iff it is supported by
+ * the host CPU. Use hvf_enabled() after this to get the result. */
+void hvf_disable(int disable);
+
+/* Returns non-0 if the host CPU supports the VMX "unrestricted guest" feature
+ * which allows the virtual CPU to directly run in "real mode". If true, this
+ * allows QEMU to run several vCPU threads in parallel (see cpus.c). Otherwise,
+ * only a a single TCG thread can run, and it will call HVF to run the current
+ * instructions, except in case of "real mode" (paging disabled, typically at
+ * boot time), or MMIO operations. */
+
+int hvf_sync_vcpus(void);
+
+int hvf_init_vcpu(CPUState *);
+int hvf_vcpu_exec(CPUState *);
+int hvf_smp_cpu_exec(CPUState *);
+void hvf_cpu_synchronize_state(CPUState *);
+void hvf_cpu_synchronize_post_reset(CPUState *);
+void hvf_cpu_synchronize_post_init(CPUState *);
+void _hvf_cpu_synchronize_post_init(CPUState *, run_on_cpu_data);
+
+void hvf_vcpu_destroy(CPUState *);
+void hvf_raise_event(CPUState *);
+/* void hvf_reset_vcpu_state(void *opaque); */
+void hvf_reset_vcpu(CPUState *);
+void vmx_update_tpr(CPUState *);
+void update_apic_tpr(CPUState *);
+int hvf_put_registers(CPUState *);
+void vmx_clear_int_window_exiting(CPUState *cpu);
+
+#define TYPE_HVF_ACCEL ACCEL_CLASS_NAME("hvf")
+
+#define HVF_STATE(obj) \
+ OBJECT_CHECK(HVFState, (obj), TYPE_HVF_ACCEL)
+
+#endif
diff --git a/include/sysemu/iothread.h b/include/sysemu/iothread.h
index 110329b2b4..799614ffd2 100644
--- a/include/sysemu/iothread.h
+++ b/include/sysemu/iothread.h
@@ -29,7 +29,8 @@ typedef struct {
GOnce once;
QemuMutex init_done_lock;
QemuCond init_done_cond; /* is thread initialization done? */
- bool stopping;
+ bool stopping; /* has iothread_stop() been called? */
+ bool running; /* should iothread_run() continue? */
int thread_id;
/* AioContext poll parameters */
@@ -42,6 +43,7 @@ typedef struct {
OBJECT_CHECK(IOThread, obj, TYPE_IOTHREAD)
char *iothread_get_id(IOThread *iothread);
+IOThread *iothread_by_id(const char *id);
AioContext *iothread_get_aio_context(IOThread *iothread);
void iothread_stop_all(void);
GMainContext *iothread_get_g_main_context(IOThread *iothread);
diff --git a/include/sysemu/numa.h b/include/sysemu/numa.h
index 5c6df2820b..b3545215f6 100644
--- a/include/sysemu/numa.h
+++ b/include/sysemu/numa.h
@@ -10,17 +10,10 @@
extern int nb_numa_nodes; /* Number of NUMA nodes */
extern bool have_numa_distance;
-struct numa_addr_range {
- ram_addr_t mem_start;
- ram_addr_t mem_end;
- QLIST_ENTRY(numa_addr_range) entry;
-};
-
struct node_info {
uint64_t node_mem;
struct HostMemoryBackend *node_memdev;
bool present;
- QLIST_HEAD(, numa_addr_range) addr; /* List to store address ranges */
uint8_t distance[MAX_NODES];
};
@@ -33,9 +26,6 @@ extern NodeInfo numa_info[MAX_NODES];
void parse_numa_opts(MachineState *ms);
void query_numa_node_mem(NumaNodeMem node_mem[]);
extern QemuOptsList qemu_numa_opts;
-void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
-void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node);
-uint32_t numa_get_node(ram_addr_t addr, Error **errp);
void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
int nb_nodes, ram_addr_t size);
void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index c083869fcf..31612caf10 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -166,8 +166,6 @@ extern Chardev *serial_hds[MAX_SERIAL_PORTS];
extern Chardev *parallel_hds[MAX_PARALLEL_PORTS];
-void hmp_usb_add(Monitor *mon, const QDict *qdict);
-void hmp_usb_del(Monitor *mon, const QDict *qdict);
void hmp_info_usb(Monitor *mon, const QDict *qdict);
void add_boot_device_path(int32_t bootindex, DeviceState *dev,
diff --git a/include/sysemu/tpm.h b/include/sysemu/tpm.h
index d7a2bd8556..852e02687c 100644
--- a/include/sysemu/tpm.h
+++ b/include/sysemu/tpm.h
@@ -12,35 +12,59 @@
#ifndef QEMU_TPM_H
#define QEMU_TPM_H
-#include "qemu/option.h"
#include "qom/object.h"
-
-typedef struct TPMState TPMState;
+#include "qapi-types.h"
int tpm_config_parse(QemuOptsList *opts_list, const char *optarg);
int tpm_init(void);
void tpm_cleanup(void);
-typedef enum TPMVersion {
+typedef enum TPMVersion {
TPM_VERSION_UNSPEC = 0,
TPM_VERSION_1_2 = 1,
TPM_VERSION_2_0 = 2,
} TPMVersion;
-TPMVersion tpm_tis_get_tpm_version(Object *obj);
+#define TYPE_TPM_IF "tpm-if"
+#define TPM_IF_CLASS(klass) \
+ OBJECT_CLASS_CHECK(TPMIfClass, (klass), TYPE_TPM_IF)
+#define TPM_IF_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(TPMIfClass, (obj), TYPE_TPM_IF)
+#define TPM_IF(obj) \
+ INTERFACE_CHECK(TPMIf, (obj), TYPE_TPM_IF)
+
+typedef struct TPMIf {
+ Object parent_obj;
+} TPMIf;
+
+typedef struct TPMIfClass {
+ InterfaceClass parent_class;
+
+ enum TpmModel model;
+ void (*request_completed)(TPMIf *obj);
+ enum TPMVersion (*get_version)(TPMIf *obj);
+} TPMIfClass;
#define TYPE_TPM_TIS "tpm-tis"
-static inline TPMVersion tpm_get_version(void)
+#define TPM_IS_TIS(chr) \
+ object_dynamic_cast(OBJECT(chr), TYPE_TPM_TIS)
+
+/* returns NULL unless there is exactly one TPM device */
+static inline TPMIf *tpm_find(void)
{
-#ifdef CONFIG_TPM
- Object *obj = object_resolve_path_type("", TYPE_TPM_TIS, NULL);
+ Object *obj = object_resolve_path_type("", TYPE_TPM_IF, NULL);
- if (obj) {
- return tpm_tis_get_tpm_version(obj);
+ return TPM_IF(obj);
+}
+
+static inline TPMVersion tpm_get_version(TPMIf *ti)
+{
+ if (!ti) {
+ return TPM_VERSION_UNSPEC;
}
-#endif
- return TPM_VERSION_UNSPEC;
+
+ return TPM_IF_GET_CLASS(ti)->get_version(ti);
}
#endif /* QEMU_TPM_H */
diff --git a/include/sysemu/tpm_backend.h b/include/sysemu/tpm_backend.h
index 03ea5a3400..0d6c994a62 100644
--- a/include/sysemu/tpm_backend.h
+++ b/include/sysemu/tpm_backend.h
@@ -43,14 +43,14 @@ struct TPMBackend {
Object parent;
/*< protected >*/
+ TPMIf *tpmif;
bool opened;
- TPMState *tpm_state;
GThreadPool *thread_pool;
bool had_startup_error;
+ QEMUBH *bh;
/* <public> */
char *id;
- enum TpmModel fe_model;
QLIST_ENTRY(TPMBackend) list;
};
@@ -63,24 +63,27 @@ struct TPMBackendClass {
/* get a descriptive text of the backend to display to the user */
const char *desc;
- TPMBackend *(*create)(QemuOpts *opts, const char *id);
+ TPMBackend *(*create)(QemuOpts *opts);
- /* start up the TPM on the backend */
- int (*startup_tpm)(TPMBackend *t);
+ /* start up the TPM on the backend - optional */
+ int (*startup_tpm)(TPMBackend *t, size_t buffersize);
+ /* optional */
void (*reset)(TPMBackend *t);
void (*cancel_cmd)(TPMBackend *t);
+ /* optional */
bool (*get_tpm_established_flag)(TPMBackend *t);
+ /* optional */
int (*reset_tpm_established_flag)(TPMBackend *t, uint8_t locty);
TPMVersion (*get_tpm_version)(TPMBackend *t);
- TpmTypeOptions *(*get_tpm_options)(TPMBackend *t);
+ size_t (*get_buffer_size)(TPMBackend *t);
- void (*opened)(TPMBackend *s, Error **errp);
+ TpmTypeOptions *(*get_tpm_options)(TPMBackend *t);
void (*handle_request)(TPMBackend *s, TPMBackendCmd *cmd);
};
@@ -96,22 +99,25 @@ enum TpmType tpm_backend_get_type(TPMBackend *s);
/**
* tpm_backend_init:
* @s: the backend to initialized
- * @state: TPMState
+ * @tpmif: TPM interface
* @datacb: callback for sending data to frontend
+ * @errp: a pointer to return the #Error object if an error occurs.
*
* Initialize the backend with the given variables.
*
* Returns 0 on success.
*/
-int tpm_backend_init(TPMBackend *s, TPMState *state);
+int tpm_backend_init(TPMBackend *s, TPMIf *tpmif, Error **errp);
/**
* tpm_backend_startup_tpm:
* @s: the backend whose TPM support is to be started
+ * @buffersize: the buffer size the TPM is supposed to use,
+ * 0 to leave it as-is
*
* Returns 0 on success.
*/
-int tpm_backend_startup_tpm(TPMBackend *s);
+int tpm_backend_startup_tpm(TPMBackend *s, size_t buffersize);
/**
* tpm_backend_had_startup_error:
@@ -171,16 +177,6 @@ bool tpm_backend_get_tpm_established_flag(TPMBackend *s);
int tpm_backend_reset_tpm_established_flag(TPMBackend *s, uint8_t locty);
/**
- * tpm_backend_open:
- * @s: the backend to open
- * @errp: a pointer to return the #Error object if an error occurs.
- *
- * This function will open the backend if it is not already open. Calling this
- * function on an already opened backend will not result in an error.
- */
-void tpm_backend_open(TPMBackend *s, Error **errp);
-
-/**
* tpm_backend_get_tpm_version:
* @s: the backend to call into
*
@@ -191,6 +187,16 @@ void tpm_backend_open(TPMBackend *s, Error **errp);
TPMVersion tpm_backend_get_tpm_version(TPMBackend *s);
/**
+ * tpm_backend_get_buffer_size:
+ * @s: the backend to call into
+ *
+ * Get the TPM's buffer size.
+ *
+ * Returns buffer size.
+ */
+size_t tpm_backend_get_buffer_size(TPMBackend *s);
+
+/**
* tpm_backend_query_tpm:
* @s: the backend
*
@@ -200,8 +206,6 @@ TPMVersion tpm_backend_get_tpm_version(TPMBackend *s);
*/
TPMInfo *tpm_backend_query_tpm(TPMBackend *s);
-TPMBackend *qemu_find_tpm(const char *id);
-
-void tpm_register_model(enum TpmModel model);
+TPMBackend *qemu_find_tpm_be(const char *id);
#endif
diff --git a/include/ui/input.h b/include/ui/input.h
index f8cee43f65..5cc76d6e41 100644
--- a/include/ui/input.h
+++ b/include/ui/input.h
@@ -77,4 +77,7 @@ extern const guint16 qemu_input_map_qcode_to_qnum[];
extern const guint qemu_input_map_qnum_to_qcode_len;
extern const guint16 qemu_input_map_qnum_to_qcode[];
+extern const guint qemu_input_map_qcode_to_linux_len;
+extern const guint16 qemu_input_map_qcode_to_linux[];
+
#endif /* INPUT_H */
diff --git a/io/Makefile.objs b/io/Makefile.objs
index 12983cca79..9a20fce4ed 100644
--- a/io/Makefile.objs
+++ b/io/Makefile.objs
@@ -8,4 +8,5 @@ io-obj-y += channel-watch.o
io-obj-y += channel-websock.o
io-obj-y += channel-util.o
io-obj-y += dns-resolver.o
+io-obj-y += net-listener.o
io-obj-y += task.o
diff --git a/io/net-listener.c b/io/net-listener.c
new file mode 100644
index 0000000000..77a4e2831c
--- /dev/null
+++ b/io/net-listener.c
@@ -0,0 +1,307 @@
+/*
+ * QEMU network listener
+ *
+ * Copyright (c) 2016-2017 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "io/net-listener.h"
+#include "io/dns-resolver.h"
+#include "qapi/error.h"
+
+QIONetListener *qio_net_listener_new(void)
+{
+ QIONetListener *ret;
+
+ ret = QIO_NET_LISTENER(object_new(TYPE_QIO_NET_LISTENER));
+
+ return ret;
+}
+
+void qio_net_listener_set_name(QIONetListener *listener,
+ const char *name)
+{
+ g_free(listener->name);
+ listener->name = g_strdup(name);
+}
+
+
+static gboolean qio_net_listener_channel_func(QIOChannel *ioc,
+ GIOCondition condition,
+ gpointer opaque)
+{
+ QIONetListener *listener = QIO_NET_LISTENER(opaque);
+ QIOChannelSocket *sioc;
+
+ sioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc),
+ NULL);
+ if (!sioc) {
+ return TRUE;
+ }
+
+ if (listener->io_func) {
+ listener->io_func(listener, sioc, listener->io_data);
+ }
+
+ object_unref(OBJECT(sioc));
+
+ return TRUE;
+}
+
+
+int qio_net_listener_open_sync(QIONetListener *listener,
+ SocketAddress *addr,
+ Error **errp)
+{
+ QIODNSResolver *resolver = qio_dns_resolver_get_instance();
+ SocketAddress **resaddrs;
+ size_t nresaddrs;
+ size_t i;
+ Error *err = NULL;
+ bool success = false;
+
+ if (qio_dns_resolver_lookup_sync(resolver,
+ addr,
+ &nresaddrs,
+ &resaddrs,
+ errp) < 0) {
+ return -1;
+ }
+
+ for (i = 0; i < nresaddrs; i++) {
+ QIOChannelSocket *sioc = qio_channel_socket_new();
+
+ if (qio_channel_socket_listen_sync(sioc, resaddrs[i],
+ err ? NULL : &err) == 0) {
+ success = true;
+
+ qio_net_listener_add(listener, sioc);
+ }
+
+ qapi_free_SocketAddress(resaddrs[i]);
+ object_unref(OBJECT(sioc));
+ }
+ g_free(resaddrs);
+
+ if (success) {
+ error_free(err);
+ return 0;
+ } else {
+ error_propagate(errp, err);
+ return -1;
+ }
+}
+
+
+void qio_net_listener_add(QIONetListener *listener,
+ QIOChannelSocket *sioc)
+{
+ if (listener->name) {
+ char *name = g_strdup_printf("%s-listen", listener->name);
+ qio_channel_set_name(QIO_CHANNEL(sioc), name);
+ g_free(name);
+ }
+
+ listener->sioc = g_renew(QIOChannelSocket *, listener->sioc,
+ listener->nsioc + 1);
+ listener->io_tag = g_renew(gulong, listener->io_tag, listener->nsioc + 1);
+ listener->sioc[listener->nsioc] = sioc;
+ listener->io_tag[listener->nsioc] = 0;
+
+ object_ref(OBJECT(sioc));
+ listener->connected = true;
+
+ if (listener->io_func != NULL) {
+ object_ref(OBJECT(listener));
+ listener->io_tag[listener->nsioc] = qio_channel_add_watch(
+ QIO_CHANNEL(listener->sioc[listener->nsioc]), G_IO_IN,
+ qio_net_listener_channel_func,
+ listener, (GDestroyNotify)object_unref);
+ }
+
+ listener->nsioc++;
+}
+
+
+void qio_net_listener_set_client_func(QIONetListener *listener,
+ QIONetListenerClientFunc func,
+ gpointer data,
+ GDestroyNotify notify)
+{
+ size_t i;
+
+ if (listener->io_notify) {
+ listener->io_notify(listener->io_data);
+ }
+ listener->io_func = func;
+ listener->io_data = data;
+ listener->io_notify = notify;
+
+ for (i = 0; i < listener->nsioc; i++) {
+ if (listener->io_tag[i]) {
+ g_source_remove(listener->io_tag[i]);
+ listener->io_tag[i] = 0;
+ }
+ }
+
+ if (listener->io_func != NULL) {
+ for (i = 0; i < listener->nsioc; i++) {
+ object_ref(OBJECT(listener));
+ listener->io_tag[i] = qio_channel_add_watch(
+ QIO_CHANNEL(listener->sioc[i]), G_IO_IN,
+ qio_net_listener_channel_func,
+ listener, (GDestroyNotify)object_unref);
+ }
+ }
+}
+
+
+struct QIONetListenerClientWaitData {
+ QIOChannelSocket *sioc;
+ GMainLoop *loop;
+};
+
+
+static gboolean qio_net_listener_wait_client_func(QIOChannel *ioc,
+ GIOCondition condition,
+ gpointer opaque)
+{
+ struct QIONetListenerClientWaitData *data = opaque;
+ QIOChannelSocket *sioc;
+
+ sioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc),
+ NULL);
+ if (!sioc) {
+ return TRUE;
+ }
+
+ if (data->sioc) {
+ object_unref(OBJECT(sioc));
+ } else {
+ data->sioc = sioc;
+ g_main_loop_quit(data->loop);
+ }
+
+ return TRUE;
+}
+
+QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener)
+{
+ GMainContext *ctxt = g_main_context_new();
+ GMainLoop *loop = g_main_loop_new(ctxt, TRUE);
+ GSource **sources;
+ struct QIONetListenerClientWaitData data = {
+ .sioc = NULL,
+ .loop = loop
+ };
+ size_t i;
+
+ for (i = 0; i < listener->nsioc; i++) {
+ if (listener->io_tag[i]) {
+ g_source_remove(listener->io_tag[i]);
+ listener->io_tag[i] = 0;
+ }
+ }
+
+ sources = g_new0(GSource *, listener->nsioc);
+ for (i = 0; i < listener->nsioc; i++) {
+ sources[i] = qio_channel_create_watch(QIO_CHANNEL(listener->sioc[i]),
+ G_IO_IN);
+
+ g_source_set_callback(sources[i],
+ (GSourceFunc)qio_net_listener_wait_client_func,
+ &data,
+ NULL);
+ g_source_attach(sources[i], ctxt);
+ }
+
+ g_main_loop_run(loop);
+
+ for (i = 0; i < listener->nsioc; i++) {
+ g_source_unref(sources[i]);
+ }
+ g_main_loop_unref(loop);
+ g_main_context_unref(ctxt);
+
+ if (listener->io_func != NULL) {
+ for (i = 0; i < listener->nsioc; i++) {
+ object_ref(OBJECT(listener));
+ listener->io_tag[i] = qio_channel_add_watch(
+ QIO_CHANNEL(listener->sioc[i]), G_IO_IN,
+ qio_net_listener_channel_func,
+ listener, (GDestroyNotify)object_unref);
+ }
+ }
+
+ return data.sioc;
+}
+
+void qio_net_listener_disconnect(QIONetListener *listener)
+{
+ size_t i;
+
+ if (!listener->connected) {
+ return;
+ }
+
+ for (i = 0; i < listener->nsioc; i++) {
+ if (listener->io_tag[i]) {
+ g_source_remove(listener->io_tag[i]);
+ listener->io_tag[i] = 0;
+ }
+ qio_channel_close(QIO_CHANNEL(listener->sioc[i]), NULL);
+ }
+ listener->connected = false;
+}
+
+
+bool qio_net_listener_is_connected(QIONetListener *listener)
+{
+ return listener->connected;
+}
+
+static void qio_net_listener_finalize(Object *obj)
+{
+ QIONetListener *listener = QIO_NET_LISTENER(obj);
+ size_t i;
+
+ qio_net_listener_disconnect(listener);
+
+ for (i = 0; i < listener->nsioc; i++) {
+ object_unref(OBJECT(listener->sioc[i]));
+ }
+ g_free(listener->io_tag);
+ g_free(listener->sioc);
+ g_free(listener->name);
+}
+
+static const TypeInfo qio_net_listener_info = {
+ .parent = TYPE_OBJECT,
+ .name = TYPE_QIO_NET_LISTENER,
+ .instance_size = sizeof(QIONetListener),
+ .instance_finalize = qio_net_listener_finalize,
+ .class_size = sizeof(QIONetListenerClass),
+};
+
+
+static void qio_net_listener_register_types(void)
+{
+ type_register_static(&qio_net_listener_info);
+}
+
+
+type_init(qio_net_listener_register_types);
diff --git a/iothread.c b/iothread.c
index 27a4288578..d8b6c1fb27 100644
--- a/iothread.c
+++ b/iothread.c
@@ -55,7 +55,7 @@ static void *iothread_run(void *opaque)
qemu_cond_signal(&iothread->init_done_cond);
qemu_mutex_unlock(&iothread->init_done_lock);
- while (!atomic_read(&iothread->stopping)) {
+ while (iothread->running) {
aio_poll(iothread->ctx, true);
if (atomic_read(&iothread->worker_context)) {
@@ -78,16 +78,25 @@ static void *iothread_run(void *opaque)
return NULL;
}
+/* Runs in iothread_run() thread */
+static void iothread_stop_bh(void *opaque)
+{
+ IOThread *iothread = opaque;
+
+ iothread->running = false; /* stop iothread_run() */
+
+ if (iothread->main_loop) {
+ g_main_loop_quit(iothread->main_loop);
+ }
+}
+
void iothread_stop(IOThread *iothread)
{
if (!iothread->ctx || iothread->stopping) {
return;
}
iothread->stopping = true;
- aio_notify(iothread->ctx);
- if (atomic_read(&iothread->main_loop)) {
- g_main_loop_quit(iothread->main_loop);
- }
+ aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread);
qemu_thread_join(&iothread->thread);
}
@@ -134,6 +143,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp)
char *name, *thread_name;
iothread->stopping = false;
+ iothread->running = true;
iothread->thread_id = -1;
iothread->ctx = aio_context_new(&local_error);
if (!iothread->ctx) {
@@ -380,3 +390,10 @@ void iothread_destroy(IOThread *iothread)
{
object_unparent(OBJECT(iothread));
}
+
+/* Lookup IOThread by its id. Only finds user-created objects, not internal
+ * iothread_create() objects. */
+IOThread *iothread_by_id(const char *id)
+{
+ return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL));
+}
diff --git a/linux-headers/asm-arm/kvm.h b/linux-headers/asm-arm/kvm.h
index fa9fae8dc2..4392955081 100644
--- a/linux-headers/asm-arm/kvm.h
+++ b/linux-headers/asm-arm/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
@@ -151,6 +152,12 @@ struct kvm_arch_memory_slot {
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
+/* PL1 Physical Timer Registers */
+#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
+#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
+
+/* Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
@@ -215,6 +222,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/linux-headers/asm-arm/kvm_para.h b/linux-headers/asm-arm/kvm_para.h
index 14fab8f0b9..baacc4996d 100644
--- a/linux-headers/asm-arm/kvm_para.h
+++ b/linux-headers/asm-arm/kvm_para.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/kvm_para.h>
diff --git a/linux-headers/asm-arm/unistd.h b/linux-headers/asm-arm/unistd.h
index 155571b874..18b0825885 100644
--- a/linux-headers/asm-arm/unistd.h
+++ b/linux-headers/asm-arm/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* arch/arm/include/asm/unistd.h
*
@@ -35,5 +36,6 @@
#define __ARM_NR_usr26 (__ARM_NR_BASE+3)
#define __ARM_NR_usr32 (__ARM_NR_BASE+4)
#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
+#define __ARM_NR_get_tls (__ARM_NR_BASE+6)
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
index d254700b08..4e80651efe 100644
--- a/linux-headers/asm-arm64/kvm.h
+++ b/linux-headers/asm-arm64/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
@@ -195,6 +196,12 @@ struct kvm_arch_memory_slot {
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+/* Physical Timer EL0 Registers */
+#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1)
+#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
+#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
+
+/* EL0 Virtual Timer Registers */
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
@@ -227,6 +234,7 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
+#define KVM_DEV_ARM_ITS_CTRL_RESET 4
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/linux-headers/asm-arm64/unistd.h b/linux-headers/asm-arm64/unistd.h
index 043d17a213..5072cbd15c 100644
--- a/linux-headers/asm-arm64/unistd.h
+++ b/linux-headers/asm-arm64/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
*
diff --git a/linux-headers/asm-powerpc/epapr_hcalls.h b/linux-headers/asm-powerpc/epapr_hcalls.h
index 33b3f89f55..6cca559993 100644
--- a/linux-headers/asm-powerpc/epapr_hcalls.h
+++ b/linux-headers/asm-powerpc/epapr_hcalls.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* ePAPR hcall interface
*
diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h
index 8cf8f0c969..61d6049f4c 100644
--- a/linux-headers/asm-powerpc/kvm.h
+++ b/linux-headers/asm-powerpc/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
diff --git a/linux-headers/asm-powerpc/kvm_para.h b/linux-headers/asm-powerpc/kvm_para.h
index 2abcc46382..9beb49cc10 100644
--- a/linux-headers/asm-powerpc/kvm_para.h
+++ b/linux-headers/asm-powerpc/kvm_para.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
diff --git a/linux-headers/asm-powerpc/unistd.h b/linux-headers/asm-powerpc/unistd.h
index a1786340e9..36abf58582 100644
--- a/linux-headers/asm-powerpc/unistd.h
+++ b/linux-headers/asm-powerpc/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* This file contains the system call numbers.
*
diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h
index 7b750ef7ee..32d372e977 100644
--- a/linux-headers/asm-s390/kvm.h
+++ b/linux-headers/asm-s390/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_KVM_S390_H
#define __LINUX_KVM_S390_H
/*
diff --git a/linux-headers/asm-s390/kvm_para.h b/linux-headers/asm-s390/kvm_para.h
index ff1f4e7b30..0dc86b3a7c 100644
--- a/linux-headers/asm-s390/kvm_para.h
+++ b/linux-headers/asm-s390/kvm_para.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* User API definitions for paravirtual devices on s390
*
diff --git a/linux-headers/asm-s390/unistd.h b/linux-headers/asm-s390/unistd.h
index 65e7e59dbb..99223b874a 100644
--- a/linux-headers/asm-s390/unistd.h
+++ b/linux-headers/asm-s390/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* S390 version
*
@@ -315,7 +316,8 @@
#define __NR_pwritev2 377
#define __NR_s390_guarded_storage 378
#define __NR_statx 379
-#define NR_syscalls 380
+#define __NR_s390_sthyi 380
+#define NR_syscalls 381
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
index c2824d02ba..f3a960488e 100644
--- a/linux-headers/asm-x86/kvm.h
+++ b/linux-headers/asm-x86/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_KVM_H
#define _ASM_X86_KVM_H
diff --git a/linux-headers/asm-x86/kvm_para.h b/linux-headers/asm-x86/kvm_para.h
index cefa127d84..4c300f6aaa 100644
--- a/linux-headers/asm-x86/kvm_para.h
+++ b/linux-headers/asm-x86/kvm_para.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_KVM_PARA_H
#define _ASM_X86_KVM_PARA_H
@@ -109,5 +110,4 @@ struct kvm_vcpu_pv_apf_data {
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
-
#endif /* _ASM_X86_KVM_PARA_H */
diff --git a/linux-headers/asm-x86/unistd.h b/linux-headers/asm-x86/unistd.h
index 1f99b12843..c04f638154 100644
--- a/linux-headers/asm-x86/unistd.h
+++ b/linux-headers/asm-x86/unistd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_UNISTD_H
#define _ASM_X86_UNISTD_H
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index dd8a91801e..ce6c2f11f4 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_KVM_H
#define __LINUX_KVM_H
@@ -930,6 +931,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_SMT_POSSIBLE 147
#define KVM_CAP_HYPERV_SYNIC2 148
#define KVM_CAP_HYPERV_VP_INDEX 149
+#define KVM_CAP_S390_AIS_MIGRATION 150
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/linux-headers/linux/kvm_para.h b/linux-headers/linux/kvm_para.h
index 15b24ff6cf..8bcd0aa853 100644
--- a/linux-headers/linux/kvm_para.h
+++ b/linux-headers/linux/kvm_para.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __LINUX_KVM_PARA_H
#define __LINUX_KVM_PARA_H
diff --git a/linux-headers/linux/psci.h b/linux-headers/linux/psci.h
index 08d443f7cf..ccd17731c6 100644
--- a/linux-headers/linux/psci.h
+++ b/linux-headers/linux/psci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* ARM Power State and Coordination Interface (PSCI) header
*
diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h
index b43cf0d415..ce78878d12 100644
--- a/linux-headers/linux/userfaultfd.h
+++ b/linux-headers/linux/userfaultfd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* include/linux/userfaultfd.h
*
diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h
index 4e7ab4c52a..4312e961ff 100644
--- a/linux-headers/linux/vfio.h
+++ b/linux-headers/linux/vfio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* VFIO API definition
*
diff --git a/linux-headers/linux/vfio_ccw.h b/linux-headers/linux/vfio_ccw.h
index 3a565511ab..5bf96c3812 100644
--- a/linux-headers/linux/vfio_ccw.h
+++ b/linux-headers/linux/vfio_ccw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Interfaces for vfio-ccw
*
diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h
index 1e86a3dd0d..e336395d67 100644
--- a/linux-headers/linux/vhost.h
+++ b/linux-headers/linux/vhost.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_VHOST_H
#define _LINUX_VHOST_H
/* Userspace interface for in-kernel virtio accelerators. */
diff --git a/linux-user/main.c b/linux-user/main.c
index 6286661bd3..99a551b04f 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -35,7 +35,6 @@
#include "elf.h"
#include "exec/log.h"
#include "trace/control.h"
-#include "glib-compat.h"
char *exec_path;
@@ -2680,6 +2679,8 @@ void cpu_loop(CPUSH4State *env)
target_siginfo_t info;
while (1) {
+ bool arch_interrupt = true;
+
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
@@ -2711,13 +2712,14 @@ void cpu_loop(CPUSH4State *env)
int sig;
sig = gdb_handlesig(cs, TARGET_SIGTRAP);
- if (sig)
- {
+ if (sig) {
info.si_signo = sig;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
- }
+ } else {
+ arch_interrupt = false;
+ }
}
break;
case 0xa0:
@@ -2728,9 +2730,9 @@ void cpu_loop(CPUSH4State *env)
info._sifields._sigfault._addr = env->tea;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
-
case EXCP_ATOMIC:
cpu_exec_step_atomic(cs);
+ arch_interrupt = false;
break;
default:
printf ("Unhandled trap: 0x%x\n", trapnr);
@@ -2738,6 +2740,14 @@ void cpu_loop(CPUSH4State *env)
exit(EXIT_FAILURE);
}
process_pending_signals (env);
+
+ /* Most of the traps imply an exception or interrupt, which
+ implies an REI instruction has been executed. Which means
+ that LDST (aka LOK_ADDR) should be cleared. But there are
+ a few exceptions for traps internal to QEMU. */
+ if (arch_interrupt) {
+ env->lock_addr = -1;
+ }
}
}
#endif
@@ -2975,6 +2985,13 @@ void cpu_loop(CPUM68KState *env)
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
+ case EXCP_CHK:
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_INTOVF;
+ info._sifields._sigfault._addr = env->pc;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
case EXCP_DIV0:
info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
diff --git a/linux-user/signal.c b/linux-user/signal.c
index cf35473671..74fa03f96d 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -5612,13 +5612,14 @@ struct target_rt_sigframe
static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
abi_ulong mask)
{
+ uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
__put_user(mask, &sc->sc_mask);
__put_user(env->aregs[7], &sc->sc_usp);
__put_user(env->dregs[0], &sc->sc_d0);
__put_user(env->dregs[1], &sc->sc_d1);
__put_user(env->aregs[0], &sc->sc_a0);
__put_user(env->aregs[1], &sc->sc_a1);
- __put_user(env->sr, &sc->sc_sr);
+ __put_user(sr, &sc->sc_sr);
__put_user(env->pc, &sc->sc_pc);
}
@@ -5634,7 +5635,7 @@ restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
__get_user(env->aregs[1], &sc->sc_a1);
__get_user(env->pc, &sc->sc_pc);
__get_user(temp, &sc->sc_sr);
- env->sr = (env->sr & 0xff00) | (temp & 0xff);
+ cpu_m68k_set_ccr(env, temp);
}
/*
@@ -5726,7 +5727,7 @@ static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
CPUM68KState *env)
{
target_greg_t *gregs = uc->tuc_mcontext.gregs;
- uint32_t sr = cpu_m68k_get_ccr(env);
+ uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
__put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
__put_user(env->dregs[0], &gregs[0]);
@@ -6530,7 +6531,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
haddr = dest;
}
env->iaoq_f = haddr;
- env->iaoq_b = haddr + 4;;
+ env->iaoq_b = haddr + 4;
return;
give_sigsegv:
diff --git a/memory.c b/memory.c
index e26e5a3b1d..4b41fb837b 100644
--- a/memory.c
+++ b/memory.c
@@ -2189,11 +2189,6 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr)
}
}
-void memory_region_set_global_locking(MemoryRegion *mr)
-{
- mr->global_locking = true;
-}
-
void memory_region_clear_global_locking(MemoryRegion *mr)
{
mr->global_locking = false;
diff --git a/migration/block.c b/migration/block.c
index 7147171bb7..e68e090c6f 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -897,7 +897,7 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
int len, flags;
char device_name[256];
int64_t addr;
- BlockBackend *blk, *blk_prev = NULL;;
+ BlockBackend *blk, *blk_prev = NULL;
Error *local_err = NULL;
uint8_t *buf;
int64_t total_sectors = 0;
diff --git a/monitor.c b/monitor.c
index e36fb5308d..d682eee2d8 100644
--- a/monitor.c
+++ b/monitor.c
@@ -28,7 +28,6 @@
#include "hw/hw.h"
#include "monitor/qdev.h"
#include "hw/usb.h"
-#include "hw/i386/pc.h"
#include "hw/pci/pci.h"
#include "sysemu/watchdog.h"
#include "hw/loader.h"
diff --git a/nbd/server.c b/nbd/server.c
index 92c0fdd03b..e443b3cf5c 100644
--- a/nbd/server.c
+++ b/nbd/server.c
@@ -1303,6 +1303,7 @@ static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
uint64_t offset,
void *data,
size_t size,
+ bool final,
Error **errp)
{
NBDStructuredReadData chunk;
@@ -1313,13 +1314,73 @@ static int coroutine_fn nbd_co_send_structured_read(NBDClient *client,
assert(size);
trace_nbd_co_send_structured_read(handle, offset, data, size);
- set_be_chunk(&chunk.h, NBD_REPLY_FLAG_DONE, NBD_REPLY_TYPE_OFFSET_DATA,
- handle, sizeof(chunk) - sizeof(chunk.h) + size);
+ set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
+ NBD_REPLY_TYPE_OFFSET_DATA, handle,
+ sizeof(chunk) - sizeof(chunk.h) + size);
stq_be_p(&chunk.offset, offset);
return nbd_co_send_iov(client, iov, 2, errp);
}
+static int coroutine_fn nbd_co_send_sparse_read(NBDClient *client,
+ uint64_t handle,
+ uint64_t offset,
+ uint8_t *data,
+ size_t size,
+ Error **errp)
+{
+ int ret = 0;
+ NBDExport *exp = client->exp;
+ size_t progress = 0;
+
+ while (progress < size) {
+ int64_t pnum;
+ int status = bdrv_block_status_above(blk_bs(exp->blk), NULL,
+ offset + progress,
+ size - progress, &pnum, NULL,
+ NULL);
+ bool final;
+
+ if (status < 0) {
+ error_setg_errno(errp, -status, "unable to check for holes");
+ return status;
+ }
+ assert(pnum && pnum <= size - progress);
+ final = progress + pnum == size;
+ if (status & BDRV_BLOCK_ZERO) {
+ NBDStructuredReadHole chunk;
+ struct iovec iov[] = {
+ {.iov_base = &chunk, .iov_len = sizeof(chunk)},
+ };
+
+ trace_nbd_co_send_structured_read_hole(handle, offset + progress,
+ pnum);
+ set_be_chunk(&chunk.h, final ? NBD_REPLY_FLAG_DONE : 0,
+ NBD_REPLY_TYPE_OFFSET_HOLE,
+ handle, sizeof(chunk) - sizeof(chunk.h));
+ stq_be_p(&chunk.offset, offset + progress);
+ stl_be_p(&chunk.length, pnum);
+ ret = nbd_co_send_iov(client, iov, 1, errp);
+ } else {
+ ret = blk_pread(exp->blk, offset + progress + exp->dev_offset,
+ data + progress, pnum);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "reading from file failed");
+ break;
+ }
+ ret = nbd_co_send_structured_read(client, handle, offset + progress,
+ data + progress, pnum, final,
+ errp);
+ }
+
+ if (ret < 0) {
+ break;
+ }
+ progress += pnum;
+ }
+ return ret;
+}
+
static int coroutine_fn nbd_co_send_structured_error(NBDClient *client,
uint64_t handle,
uint32_t error,
@@ -1481,6 +1542,17 @@ static coroutine_fn void nbd_trip(void *opaque)
}
}
+ if (client->structured_reply && !(request.flags & NBD_CMD_FLAG_DF) &&
+ request.len) {
+ ret = nbd_co_send_sparse_read(req->client, request.handle,
+ request.from, req->data, request.len,
+ &local_err);
+ if (ret < 0) {
+ goto reply;
+ }
+ goto done;
+ }
+
ret = blk_pread(exp->blk, request.from + exp->dev_offset,
req->data, request.len);
if (ret < 0) {
@@ -1561,7 +1633,8 @@ reply:
} else if (reply_data_len) {
ret = nbd_co_send_structured_read(req->client, request.handle,
request.from, req->data,
- reply_data_len, &local_err);
+ reply_data_len, true,
+ &local_err);
} else {
ret = nbd_co_send_structured_done(req->client, request.handle,
&local_err);
diff --git a/nbd/trace-events b/nbd/trace-events
index 92568edce5..2b8268ce8c 100644
--- a/nbd/trace-events
+++ b/nbd/trace-events
@@ -57,6 +57,7 @@ nbd_blk_aio_detach(const char *name, void *ctx) "Export %s: Detaching clients fr
nbd_co_send_simple_reply(uint64_t handle, uint32_t error, const char *errname, int len) "Send simple reply: handle = %" PRIu64 ", error = %" PRIu32 " (%s), len = %d"
nbd_co_send_structured_done(uint64_t handle) "Send structured reply done: handle = %" PRIu64
nbd_co_send_structured_read(uint64_t handle, uint64_t offset, void *data, size_t size) "Send structured read data reply: handle = %" PRIu64 ", offset = %" PRIu64 ", data = %p, len = %zu"
+nbd_co_send_structured_read_hole(uint64_t handle, uint64_t offset, size_t size) "Send structured read hole reply: handle = %" PRIu64 ", offset = %" PRIu64 ", len = %zu"
nbd_co_send_structured_error(uint64_t handle, int err, const char *errname, const char *msg) "Send structured error reply: handle = %" PRIu64 ", error = %d (%s), msg = '%s'"
nbd_co_receive_request_decode_type(uint64_t handle, uint16_t type, const char *name) "Decoding type: handle = %" PRIu64 ", type = %" PRIu16 " (%s)"
nbd_co_receive_request_payload_received(uint64_t handle, uint32_t len) "Payload received: handle = %" PRIu64 ", len = %" PRIu32
diff --git a/net/colo-compare.c b/net/colo-compare.c
index 1ce195f877..0ebdec936c 100644
--- a/net/colo-compare.c
+++ b/net/colo-compare.c
@@ -23,7 +23,6 @@
#include "qom/object_interfaces.h"
#include "qemu/iov.h"
#include "qom/object.h"
-#include "qemu/typedefs.h"
#include "net/queue.h"
#include "chardev/char-fe.h"
#include "qemu/sockets.h"
diff --git a/net/net.c b/net/net.c
index 39ef546708..2b81c93193 100644
--- a/net/net.c
+++ b/net/net.c
@@ -1565,13 +1565,6 @@ int net_init_clients(void)
int net_client_parse(QemuOptsList *opts_list, const char *optarg)
{
-#if defined(CONFIG_SLIRP)
- int ret;
- if (net_slirp_parse_legacy(opts_list, optarg, &ret)) {
- return ret;
- }
-#endif
-
if (!qemu_opts_parse_noisily(opts_list, optarg, true)) {
return -1;
}
@@ -1581,25 +1574,48 @@ int net_client_parse(QemuOptsList *opts_list, const char *optarg)
/* From FreeBSD */
/* XXX: optimize */
-unsigned compute_mcast_idx(const uint8_t *ep)
+uint32_t net_crc32(const uint8_t *p, int len)
{
uint32_t crc;
int carry, i, j;
uint8_t b;
crc = 0xffffffff;
- for (i = 0; i < 6; i++) {
- b = *ep++;
+ for (i = 0; i < len; i++) {
+ b = *p++;
for (j = 0; j < 8; j++) {
carry = ((crc & 0x80000000L) ? 1 : 0) ^ (b & 0x01);
crc <<= 1;
b >>= 1;
if (carry) {
- crc = ((crc ^ POLYNOMIAL) | carry);
+ crc = ((crc ^ POLYNOMIAL_BE) | carry);
}
}
}
- return crc >> 26;
+
+ return crc;
+}
+
+uint32_t net_crc32_le(const uint8_t *p, int len)
+{
+ uint32_t crc;
+ int carry, i, j;
+ uint8_t b;
+
+ crc = 0xffffffff;
+ for (i = 0; i < len; i++) {
+ b = *p++;
+ for (j = 0; j < 8; j++) {
+ carry = (crc & 0x1) ^ (b & 0x01);
+ crc >>= 1;
+ b >>= 1;
+ if (carry) {
+ crc ^= POLYNOMIAL_LE;
+ }
+ }
+ }
+
+ return crc;
}
QemuOptsList qemu_netdev_opts = {
diff --git a/net/slirp.c b/net/slirp.c
index 318a26e892..cb8ca2312f 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -956,37 +956,3 @@ int net_init_slirp(const Netdev *netdev, const char *name,
return ret;
}
-
-int net_slirp_parse_legacy(QemuOptsList *opts_list, const char *optarg, int *ret)
-{
- if (strcmp(opts_list->name, "net") != 0 ||
- strncmp(optarg, "channel,", strlen("channel,")) != 0) {
- return 0;
- }
-
- error_report("The '-net channel' option is deprecated. "
- "Please use '-netdev user,guestfwd=...' instead.");
-
- /* handle legacy -net channel,port:chr */
- optarg += strlen("channel,");
-
- if (QTAILQ_EMPTY(&slirp_stacks)) {
- struct slirp_config_str *config;
-
- config = g_malloc(sizeof(*config));
- pstrcpy(config->str, sizeof(config->str), optarg);
- config->flags = SLIRP_CFG_LEGACY;
- config->next = slirp_configs;
- slirp_configs = config;
- *ret = 0;
- } else {
- Error *err = NULL;
- *ret = slirp_guestfwd(QTAILQ_FIRST(&slirp_stacks), optarg, 1, &err);
- if (*ret < 0) {
- error_report_err(err);
- }
- }
-
- return 1;
-}
-
diff --git a/numa.c b/numa.c
index 7151b24d1c..7b9c33ad12 100644
--- a/numa.c
+++ b/numa.c
@@ -29,7 +29,6 @@
#include "qemu/bitmap.h"
#include "qom/cpu.h"
#include "qemu/error-report.h"
-#include "include/exec/cpu-common.h" /* for RAM_ADDR_FMT */
#include "qapi-visit.h"
#include "qapi/opts-visitor.h"
#include "hw/boards.h"
@@ -55,92 +54,6 @@ int nb_numa_nodes;
bool have_numa_distance;
NodeInfo numa_info[MAX_NODES];
-void numa_set_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
-{
- struct numa_addr_range *range;
-
- /*
- * Memory-less nodes can come here with 0 size in which case,
- * there is nothing to do.
- */
- if (!size) {
- return;
- }
-
- range = g_malloc0(sizeof(*range));
- range->mem_start = addr;
- range->mem_end = addr + size - 1;
- QLIST_INSERT_HEAD(&numa_info[node].addr, range, entry);
-}
-
-void numa_unset_mem_node_id(ram_addr_t addr, uint64_t size, uint32_t node)
-{
- struct numa_addr_range *range, *next;
-
- QLIST_FOREACH_SAFE(range, &numa_info[node].addr, entry, next) {
- if (addr == range->mem_start && (addr + size - 1) == range->mem_end) {
- QLIST_REMOVE(range, entry);
- g_free(range);
- return;
- }
- }
-}
-
-static void numa_set_mem_ranges(void)
-{
- int i;
- ram_addr_t mem_start = 0;
-
- /*
- * Deduce start address of each node and use it to store
- * the address range info in numa_info address range list
- */
- for (i = 0; i < nb_numa_nodes; i++) {
- numa_set_mem_node_id(mem_start, numa_info[i].node_mem, i);
- mem_start += numa_info[i].node_mem;
- }
-}
-
-/*
- * Check if @addr falls under NUMA @node.
- */
-static bool numa_addr_belongs_to_node(ram_addr_t addr, uint32_t node)
-{
- struct numa_addr_range *range;
-
- QLIST_FOREACH(range, &numa_info[node].addr, entry) {
- if (addr >= range->mem_start && addr <= range->mem_end) {
- return true;
- }
- }
- return false;
-}
-
-/*
- * Given an address, return the index of the NUMA node to which the
- * address belongs to.
- */
-uint32_t numa_get_node(ram_addr_t addr, Error **errp)
-{
- uint32_t i;
-
- /* For non NUMA configurations, check if the addr falls under node 0 */
- if (!nb_numa_nodes) {
- if (numa_addr_belongs_to_node(addr, 0)) {
- return 0;
- }
- }
-
- for (i = 0; i < nb_numa_nodes; i++) {
- if (numa_addr_belongs_to_node(addr, i)) {
- return i;
- }
- }
-
- error_setg(errp, "Address 0x" RAM_ADDR_FMT " doesn't belong to any "
- "NUMA node", addr);
- return -1;
-}
static void parse_numa_node(MachineState *ms, NumaNodeOptions *node,
Error **errp)
@@ -497,12 +410,6 @@ void parse_numa_opts(MachineState *ms)
exit(1);
}
- for (i = 0; i < nb_numa_nodes; i++) {
- QLIST_INIT(&numa_info[i].addr);
- }
-
- numa_set_mem_ranges();
-
/* QEMU needs at least all unique node pair distances to build
* the whole NUMA distance table. QEMU treats the distance table
* as symmetric by default, i.e. distance A->B == distance B->A.
@@ -522,8 +429,6 @@ void parse_numa_opts(MachineState *ms)
/* Validation succeeded, now fill in any missing distances. */
complete_init_numa_distance();
}
- } else {
- numa_set_mem_node_id(0, ram_size, 0);
}
}
diff --git a/pc-bios/s390-ccw.img b/pc-bios/s390-ccw.img
index 7415f1a3e7..97155d2638 100644
--- a/pc-bios/s390-ccw.img
+++ b/pc-bios/s390-ccw.img
Binary files differ
diff --git a/pc-bios/s390-ccw/start.S b/pc-bios/s390-ccw/start.S
index 43f9bd243e..eb8d024dbb 100644
--- a/pc-bios/s390-ccw/start.S
+++ b/pc-bios/s390-ccw/start.S
@@ -3,7 +3,7 @@
* into the pc-bios directory of qemu.
*
* Copyright (c) 2013 Alexander Graf <agraf@suse.de>
- * Copyright 2013 IBM Corp.
+ * Copyright IBM Corp. 2013, 2017
*
* This work is licensed under the terms of the GNU GPL, version 2 or (at
* your option) any later version. See the COPYING file in the top-level
@@ -13,8 +13,32 @@
.globl _start
_start:
-larl %r15, stack + 0x8000 /* Set up stack */
-j main /* And call C */
+ larl %r15, stack + 0x8000 /* Set up stack */
+
+ /* clear bss */
+ larl %r2, __bss_start
+ larl %r3, _end
+ slgr %r3, %r2 /* get sizeof bss */
+ ltgr %r3,%r3 /* bss emtpy? */
+ jz done
+ aghi %r3,-1
+ srlg %r4,%r3,8 /* how many 256 byte chunks? */
+ ltgr %r4,%r4
+ lgr %r1,%r2
+ jz remainder
+loop:
+ xc 0(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r4,loop
+remainder:
+ larl %r2,memsetxc
+ ex %r3,0(%r2)
+done:
+ j main /* And call C */
+
+memsetxc:
+ xc 0(1,%r1),0(%r1)
+
/*
* void disabled_wait(void)
diff --git a/qapi-schema.json b/qapi-schema.json
index 18457954a8..5c06745c79 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -1046,17 +1046,6 @@
{ 'command': 'system_powerdown' }
##
-# @cpu:
-#
-# This command is a nop that is only provided for the purposes of compatibility.
-#
-# Since: 0.14.0
-#
-# Notes: Do not use this command.
-##
-{ 'command': 'cpu', 'data': {'index': 'int'} }
-
-##
# @cpu-add:
#
# Adds CPU with specified ID
@@ -3188,7 +3177,7 @@
#
# Show Virtual Machine Generation ID
#
-# Since 2.9
+# Since: 2.9
##
{ 'command': 'query-vm-generation-id', 'returns': 'GuidInfo' }
diff --git a/qapi/block-core.json b/qapi/block-core.json
index dd763dcf87..e94a6881b2 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -3918,6 +3918,10 @@
# does not support all kinds of operations, all kinds of children, nor
# all block drivers.
#
+# FIXME Removing children from a quorum node means introducing gaps in the
+# child indices. This cannot be represented in the 'children' list of
+# BlockdevOptionsQuorum, as returned by .bdrv_refresh_filename().
+#
# Warning: The data in a new quorum child MUST be consistent with that of
# the rest of the array.
#
@@ -3949,3 +3953,43 @@
'data' : { 'parent': 'str',
'*child': 'str',
'*node': 'str' } }
+
+##
+# @x-blockdev-set-iothread:
+#
+# Move @node and its children into the @iothread. If @iothread is null then
+# move @node and its children into the main loop.
+#
+# The node must not be attached to a BlockBackend.
+#
+# @node-name: the name of the block driver node
+#
+# @iothread: the name of the IOThread object or null for the main loop
+#
+# @force: true if the node and its children should be moved when a BlockBackend
+# is already attached
+#
+# Note: this command is experimental and intended for test cases that need
+# control over IOThreads only.
+#
+# Since: 2.12
+#
+# Example:
+#
+# 1. Move a node into an IOThread
+# -> { "execute": "x-blockdev-set-iothread",
+# "arguments": { "node-name": "disk1",
+# "iothread": "iothread0" } }
+# <- { "return": {} }
+#
+# 2. Move a node into the main loop
+# -> { "execute": "x-blockdev-set-iothread",
+# "arguments": { "node-name": "disk1",
+# "iothread": null } }
+# <- { "return": {} }
+#
+##
+{ 'command': 'x-blockdev-set-iothread',
+ 'data' : { 'node-name': 'str',
+ 'iothread': 'StrOrNull',
+ '*force': 'bool' } }
diff --git a/qemu-doc.texi b/qemu-doc.texi
index db2351c746..9d0159832e 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -245,6 +245,222 @@ targets do not need a disk image.
@c man end
+@subsection Device URL Syntax
+@c TODO merge this with section Disk Images
+
+@c man begin NOTES
+
+In addition to using normal file images for the emulated storage devices,
+QEMU can also use networked resources such as iSCSI devices. These are
+specified using a special URL syntax.
+
+@table @option
+@item iSCSI
+iSCSI support allows QEMU to access iSCSI resources directly and use as
+images for the guest storage. Both disk and cdrom images are supported.
+
+Syntax for specifying iSCSI LUNs is
+``iscsi://<target-ip>[:<port>]/<target-iqn>/<lun>''
+
+By default qemu will use the iSCSI initiator-name
+'iqn.2008-11.org.linux-kvm[:<name>]' but this can also be set from the command
+line or a configuration file.
+
+Since version Qemu 2.4 it is possible to specify a iSCSI request timeout to detect
+stalled requests and force a reestablishment of the session. The timeout
+is specified in seconds. The default is 0 which means no timeout. Libiscsi
+1.15.0 or greater is required for this feature.
+
+Example (without authentication):
+@example
+qemu-system-i386 -iscsi initiator-name=iqn.2001-04.com.example:my-initiator \
+ -cdrom iscsi://192.0.2.1/iqn.2001-04.com.example/2 \
+ -drive file=iscsi://192.0.2.1/iqn.2001-04.com.example/1
+@end example
+
+Example (CHAP username/password via URL):
+@example
+qemu-system-i386 -drive file=iscsi://user%password@@192.0.2.1/iqn.2001-04.com.example/1
+@end example
+
+Example (CHAP username/password via environment variables):
+@example
+LIBISCSI_CHAP_USERNAME="user" \
+LIBISCSI_CHAP_PASSWORD="password" \
+qemu-system-i386 -drive file=iscsi://192.0.2.1/iqn.2001-04.com.example/1
+@end example
+
+@item NBD
+QEMU supports NBD (Network Block Devices) both using TCP protocol as well
+as Unix Domain Sockets.
+
+Syntax for specifying a NBD device using TCP
+``nbd:<server-ip>:<port>[:exportname=<export>]''
+
+Syntax for specifying a NBD device using Unix Domain Sockets
+``nbd:unix:<domain-socket>[:exportname=<export>]''
+
+Example for TCP
+@example
+qemu-system-i386 --drive file=nbd:192.0.2.1:30000
+@end example
+
+Example for Unix Domain Sockets
+@example
+qemu-system-i386 --drive file=nbd:unix:/tmp/nbd-socket
+@end example
+
+@item SSH
+QEMU supports SSH (Secure Shell) access to remote disks.
+
+Examples:
+@example
+qemu-system-i386 -drive file=ssh://user@@host/path/to/disk.img
+qemu-system-i386 -drive file.driver=ssh,file.user=user,file.host=host,file.port=22,file.path=/path/to/disk.img
+@end example
+
+Currently authentication must be done using ssh-agent. Other
+authentication methods may be supported in future.
+
+@item Sheepdog
+Sheepdog is a distributed storage system for QEMU.
+QEMU supports using either local sheepdog devices or remote networked
+devices.
+
+Syntax for specifying a sheepdog device
+@example
+sheepdog[+tcp|+unix]://[host:port]/vdiname[?socket=path][#snapid|#tag]
+@end example
+
+Example
+@example
+qemu-system-i386 --drive file=sheepdog://192.0.2.1:30000/MyVirtualMachine
+@end example
+
+See also @url{https://sheepdog.github.io/sheepdog/}.
+
+@item GlusterFS
+GlusterFS is a user space distributed file system.
+QEMU supports the use of GlusterFS volumes for hosting VM disk images using
+TCP, Unix Domain Sockets and RDMA transport protocols.
+
+Syntax for specifying a VM disk image on GlusterFS volume is
+@example
+
+URI:
+gluster[+type]://[host[:port]]/volume/path[?socket=...][,debug=N][,logfile=...]
+
+JSON:
+'json:@{"driver":"qcow2","file":@{"driver":"gluster","volume":"testvol","path":"a.img","debug":N,"logfile":"...",
+@ "server":[@{"type":"tcp","host":"...","port":"..."@},
+@ @{"type":"unix","socket":"..."@}]@}@}'
+@end example
+
+
+Example
+@example
+URI:
+qemu-system-x86_64 --drive file=gluster://192.0.2.1/testvol/a.img,
+@ file.debug=9,file.logfile=/var/log/qemu-gluster.log
+
+JSON:
+qemu-system-x86_64 'json:@{"driver":"qcow2",
+@ "file":@{"driver":"gluster",
+@ "volume":"testvol","path":"a.img",
+@ "debug":9,"logfile":"/var/log/qemu-gluster.log",
+@ "server":[@{"type":"tcp","host":"1.2.3.4","port":24007@},
+@ @{"type":"unix","socket":"/var/run/glusterd.socket"@}]@}@}'
+qemu-system-x86_64 -drive driver=qcow2,file.driver=gluster,file.volume=testvol,file.path=/path/a.img,
+@ file.debug=9,file.logfile=/var/log/qemu-gluster.log,
+@ file.server.0.type=tcp,file.server.0.host=1.2.3.4,file.server.0.port=24007,
+@ file.server.1.type=unix,file.server.1.socket=/var/run/glusterd.socket
+@end example
+
+See also @url{http://www.gluster.org}.
+
+@item HTTP/HTTPS/FTP/FTPS
+QEMU supports read-only access to files accessed over http(s) and ftp(s).
+
+Syntax using a single filename:
+@example
+<protocol>://[<username>[:<password>]@@]<host>/<path>
+@end example
+
+where:
+@table @option
+@item protocol
+'http', 'https', 'ftp', or 'ftps'.
+
+@item username
+Optional username for authentication to the remote server.
+
+@item password
+Optional password for authentication to the remote server.
+
+@item host
+Address of the remote server.
+
+@item path
+Path on the remote server, including any query string.
+@end table
+
+The following options are also supported:
+@table @option
+@item url
+The full URL when passing options to the driver explicitly.
+
+@item readahead
+The amount of data to read ahead with each range request to the remote server.
+This value may optionally have the suffix 'T', 'G', 'M', 'K', 'k' or 'b'. If it
+does not have a suffix, it will be assumed to be in bytes. The value must be a
+multiple of 512 bytes. It defaults to 256k.
+
+@item sslverify
+Whether to verify the remote server's certificate when connecting over SSL. It
+can have the value 'on' or 'off'. It defaults to 'on'.
+
+@item cookie
+Send this cookie (it can also be a list of cookies separated by ';') with
+each outgoing request. Only supported when using protocols such as HTTP
+which support cookies, otherwise ignored.
+
+@item timeout
+Set the timeout in seconds of the CURL connection. This timeout is the time
+that CURL waits for a response from the remote server to get the size of the
+image to be downloaded. If not set, the default timeout of 5 seconds is used.
+@end table
+
+Note that when passing options to qemu explicitly, @option{driver} is the value
+of <protocol>.
+
+Example: boot from a remote Fedora 20 live ISO image
+@example
+qemu-system-x86_64 --drive media=cdrom,file=http://dl.fedoraproject.org/pub/fedora/linux/releases/20/Live/x86_64/Fedora-Live-Desktop-x86_64-20-1.iso,readonly
+
+qemu-system-x86_64 --drive media=cdrom,file.driver=http,file.url=http://dl.fedoraproject.org/pub/fedora/linux/releases/20/Live/x86_64/Fedora-Live-Desktop-x86_64-20-1.iso,readonly
+@end example
+
+Example: boot from a remote Fedora 20 cloud image using a local overlay for
+writes, copy-on-read, and a readahead of 64k
+@example
+qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"http",, "file.url":"https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/x86_64/Fedora-x86_64-20-20131211.1-sda.qcow2",, "file.readahead":"64k"@}' /tmp/Fedora-x86_64-20-20131211.1-sda.qcow2
+
+qemu-system-x86_64 -drive file=/tmp/Fedora-x86_64-20-20131211.1-sda.qcow2,copy-on-read=on
+@end example
+
+Example: boot from an image stored on a VMware vSphere server with a self-signed
+certificate using a local overlay for writes, a readahead of 64k and a timeout
+of 10 seconds.
+@example
+qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"https",, "file.url":"https://user:password@@vsphere.example.com/folder/test/test-flat.vmdk?dcPath=Datacenter&dsName=datastore1",, "file.sslverify":"off",, "file.readahead":"64k",, "file.timeout":10@}' /tmp/test.qcow2
+
+qemu-system-x86_64 -drive file=/tmp/test.qcow2
+@end example
+
+@end table
+
+@c man end
+
@node pcsys_keys
@section Keys in the graphical frontends
@@ -2373,12 +2589,6 @@ deprecated.
@section System emulator command line arguments
-@subsection -drive boot=on|off (since 1.3.0)
-
-The ``boot=on|off'' option to the ``-drive'' argument is
-ignored. Applications should use the ``bootindex=N'' parameter
-to set an absolute ordering between devices instead.
-
@subsection -tdf (since 1.3.0)
The ``-tdf'' argument is ignored. The behaviour implemented
@@ -2396,11 +2606,6 @@ synonym for setting ``-global kvm-pit.lost_tick_policy=discard''.
The ``-no-kvm-irqchip'' argument is now a synonym for
setting ``-machine kernel_irqchip=off''.
-@subsection -no-kvm-pit (since 1.3.0)
-
-The ``-no-kvm-pit'' argument is ignored. It is no longer
-possible to disable the KVM PIT directly.
-
@subsection -no-kvm (since 1.3.0)
The ``-no-kvm'' argument is now a synonym for setting
@@ -2437,32 +2642,36 @@ combined with ``-vnc tls-creds=tls0'
@subsection -tftp (since 2.6.0)
-The ``-tftp /some/dir'' argument is now a synonym for setting
-the ``-netdev user,tftp=/some/dir' argument. The new syntax
-allows different settings to be provided per NIC.
+The ``-tftp /some/dir'' argument is replaced by
+``-netdev user,id=x,tftp=/some/dir'', either accompanied with
+``-device ...,netdev=x'' (for pluggable NICs) or ``-net nic,netdev=x''
+(for embedded NICs). The new syntax allows different settings to be
+provided per NIC.
@subsection -bootp (since 2.6.0)
-The ``-bootp /some/file'' argument is now a synonym for setting
-the ``-netdev user,bootp=/some/file' argument. The new syntax
-allows different settings to be provided per NIC.
+The ``-bootp /some/file'' argument is replaced by
+``-netdev user,id=x,bootp=/some/file'', either accompanied with
+``-device ...,netdev=x'' (for pluggable NICs) or ``-net nic,netdev=x''
+(for embedded NICs). The new syntax allows different settings to be
+provided per NIC.
@subsection -redir (since 2.6.0)
-The ``-redir ARGS'' argument is now a synonym for setting
-the ``-netdev user,hostfwd=ARGS'' argument instead. The new
-syntax allows different settings to be provided per NIC.
+The ``-redir [tcp|udp]:hostport:[guestaddr]:guestport'' argument is
+replaced by ``-netdev
+user,id=x,hostfwd=[tcp|udp]:[hostaddr]:hostport-[guestaddr]:guestport'',
+either accompanied with ``-device ...,netdev=x'' (for pluggable NICs) or
+``-net nic,netdev=x'' (for embedded NICs). The new syntax allows different
+settings to be provided per NIC.
@subsection -smb (since 2.6.0)
-The ``-smb /some/dir'' argument is now a synonym for setting
-the ``-netdev user,smb=/some/dir'' argument instead. The new
-syntax allows different settings to be provided per NIC.
-
-@subsection -net channel (since 2.6.0)
-
-The ``--net channel,ARGS'' argument is now a synonym for setting
-the ``-netdev user,guestfwd=ARGS'' argument instead.
+The ``-smb /some/dir'' argument is replaced by
+``-netdev user,id=x,smb=/some/dir'', either accompanied with
+``-device ...,netdev=x'' (for pluggable NICs) or ``-net nic,netdev=x''
+(for embedded NICs). The new syntax allows different settings to be
+provided per NIC.
@subsection -net vlan (since 2.9.0)
@@ -2475,20 +2684,27 @@ longer be directly supported in QEMU.
The ``-drive if=scsi'' argument is replaced by the the
``-device BUS-TYPE'' argument combined with ``-drive if=none''.
+@subsection -drive cyls=...,heads=...,secs=...,trans=... (since 2.10.0)
+
+The drive geometry arguments are replaced by the the geometry arguments
+that can be specified with the ``-device'' parameter.
+
+@subsection -drive serial=... (since 2.10.0)
+
+The drive serial argument is replaced by the the serial argument
+that can be specified with the ``-device'' parameter.
+
+@subsection -drive addr=... (since 2.10.0)
+
+The drive addr argument is replaced by the the addr argument
+that can be specified with the ``-device'' parameter.
+
@subsection -net dump (since 2.10.0)
The ``--net dump'' argument is now replaced with the
``-object filter-dump'' argument which works in combination
with the modern ``-netdev`` backends instead.
-@subsection -hdachs (since 2.10.0)
-
-The ``-hdachs'' argument is now a synonym for setting
-the ``cyls'', ``heads'', ``secs'', and ``trans'' properties
-on the ``ide-hd'' device using the ``-device'' argument.
-The new syntax allows different settings to be provided
-per disk.
-
@subsection -usbdevice (since 2.10.0)
The ``-usbdevice DEV'' argument is now a synonym for setting
@@ -2501,6 +2717,22 @@ enabled via the ``-machine usb=on'' argument.
The ``-nodefconfig`` argument is a synonym for ``-no-user-config``.
+@subsection -machine s390-squash-mcss=on|off (since 2.12.0)
+
+The ``s390-squash-mcss=on`` property has been obsoleted by allowing the
+cssid to be chosen freely. Instead of squashing subchannels into the
+default channel subsystem image for guests that do not support multiple
+channel subsystems, all devices can be put into the default channel
+subsystem image.
+
+@subsection -fsdev handle (since 2.12.0)
+
+The ``handle'' fsdev backend does not support symlinks and causes the 9p
+filesystem in the guest to fail a fair amount of tests from the PJD POSIX
+filesystem test suite. Also it requires the CAP_DAC_READ_SEARCH capability,
+which is not the recommended way to run QEMU. This backend should not be
+used and it will be removed with no replacement.
+
@section qemu-img command line arguments
@subsection convert -s (since 2.0.0)
@@ -2518,14 +2750,6 @@ The ``host_net_add'' command is replaced by the ``netdev_add'' command.
The ``host_net_remove'' command is replaced by the ``netdev_del'' command.
-@subsection usb_add (since 2.10.0)
-
-The ``usb_add'' command is replaced by the ``device_add'' command.
-
-@subsection usb_del (since 2.10.0)
-
-The ``usb_del'' command is replaced by the ``device_del'' command.
-
@section System emulator devices
@subsection ivshmem (since 2.6.0)
diff --git a/qemu-io-cmds.c b/qemu-io-cmds.c
index de8e3de726..a6a70fc3dc 100644
--- a/qemu-io-cmds.c
+++ b/qemu-io-cmds.c
@@ -2013,8 +2013,11 @@ static int reopen_f(BlockBackend *blk, int argc, char **argv)
opts = qopts ? qemu_opts_to_qdict(qopts, NULL) : NULL;
qemu_opts_reset(&reopen_opts);
+ bdrv_subtree_drained_begin(bs);
brq = bdrv_reopen_queue(NULL, bs, opts, flags);
bdrv_reopen_multiple(bdrv_get_aio_context(bs), brq, &local_err);
+ bdrv_subtree_drained_end(bs);
+
if (local_err) {
error_report_err(local_err);
} else {
diff --git a/qemu-nbd.c b/qemu-nbd.c
index d75ca51482..3723493be1 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -37,6 +37,7 @@
#include "qapi/qmp/qstring.h"
#include "qom/object_interfaces.h"
#include "io/channel-socket.h"
+#include "io/net-listener.h"
#include "crypto/init.h"
#include "trace/control.h"
#include "qemu-version.h"
@@ -62,8 +63,7 @@ static int persistent = 0;
static enum { RUNNING, TERMINATE, TERMINATING, TERMINATED } state;
static int shared = 1;
static int nb_fds;
-static QIOChannelSocket *server_ioc;
-static int server_watch = -1;
+static QIONetListener *server;
static QCryptoTLSCreds *tlscreds;
static void usage(const char *name)
@@ -344,44 +344,25 @@ static void nbd_client_closed(NBDClient *client, bool negotiated)
nbd_client_put(client);
}
-static gboolean nbd_accept(QIOChannel *ioc, GIOCondition cond, gpointer opaque)
+static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc,
+ gpointer opaque)
{
- QIOChannelSocket *cioc;
-
- cioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc),
- NULL);
- if (!cioc) {
- return TRUE;
- }
-
if (state >= TERMINATE) {
- object_unref(OBJECT(cioc));
- return TRUE;
+ return;
}
nb_fds++;
nbd_update_server_watch();
nbd_client_new(newproto ? NULL : exp, cioc,
tlscreds, NULL, nbd_client_closed);
- object_unref(OBJECT(cioc));
-
- return TRUE;
}
static void nbd_update_server_watch(void)
{
if (nbd_can_accept()) {
- if (server_watch == -1) {
- server_watch = qio_channel_add_watch(QIO_CHANNEL(server_ioc),
- G_IO_IN,
- nbd_accept,
- NULL, NULL);
- }
+ qio_net_listener_set_client_func(server, nbd_accept, NULL, NULL);
} else {
- if (server_watch != -1) {
- g_source_remove(server_watch);
- server_watch = -1;
- }
+ qio_net_listener_set_client_func(server, NULL, NULL, NULL);
}
}
@@ -915,23 +896,29 @@ int main(int argc, char **argv)
snprintf(sockpath, 128, SOCKET_PATH, basename(device));
}
+ server = qio_net_listener_new();
if (socket_activation == 0) {
- server_ioc = qio_channel_socket_new();
saddr = nbd_build_socket_address(sockpath, bindto, port);
- if (qio_channel_socket_listen_sync(server_ioc, saddr, &local_err) < 0) {
- object_unref(OBJECT(server_ioc));
+ if (qio_net_listener_open_sync(server, saddr, &local_err) < 0) {
+ object_unref(OBJECT(server));
error_report_err(local_err);
- return 1;
+ exit(EXIT_FAILURE);
}
} else {
+ size_t i;
/* See comment in check_socket_activation above. */
- assert(socket_activation == 1);
- server_ioc = qio_channel_socket_new_fd(FIRST_SOCKET_ACTIVATION_FD,
- &local_err);
- if (server_ioc == NULL) {
- error_report("Failed to use socket activation: %s",
- error_get_pretty(local_err));
- exit(EXIT_FAILURE);
+ for (i = 0; i < socket_activation; i++) {
+ QIOChannelSocket *sioc;
+ sioc = qio_channel_socket_new_fd(FIRST_SOCKET_ACTIVATION_FD + i,
+ &local_err);
+ if (sioc == NULL) {
+ object_unref(OBJECT(server));
+ error_report("Failed to use socket activation: %s",
+ error_get_pretty(local_err));
+ exit(EXIT_FAILURE);
+ }
+ qio_net_listener_add(server, sioc);
+ object_unref(OBJECT(sioc));
}
}
diff --git a/qemu-options-wrapper.h b/qemu-options-wrapper.h
index 4d7aeb1352..13bfea0294 100644
--- a/qemu-options-wrapper.h
+++ b/qemu-options-wrapper.h
@@ -14,7 +14,7 @@
#define ARCHHEADING(text, arch_mask) \
if ((arch_mask) & arch_type) \
- puts(stringify(text) ":");
+ puts(stringify(text));
#define DEFHEADING(text) ARCHHEADING(text, QEMU_ARCH_ALL)
diff --git a/qemu-options.hx b/qemu-options.hx
index f11c4ac960..678181c599 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -6,7 +6,7 @@ HXCOMM construct option structures, enums and help message for specified
HXCOMM architectures.
HXCOMM HXCOMM can be used for comments, discarded from both texi and C
-DEFHEADING(Standard options)
+DEFHEADING(Standard options:)
STEXI
@table @option
ETEXI
@@ -31,7 +31,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
"-machine [type=]name[,prop[=value][,...]]\n"
" selects emulated machine ('-machine help' for list)\n"
" property accel=accel1[:accel2[:...]] selects accelerator\n"
- " supported accelerators are kvm, xen, hax or tcg (default: tcg)\n"
+ " supported accelerators are kvm, xen, hax, hvf or tcg (default: tcg)\n"
" kernel_irqchip=on|off|split controls accelerated irqchip support (default=off)\n"
" vmport=on|off|auto controls emulation of vmport (default: auto)\n"
" kvm_shadow_mem=size of KVM shadow MMU in bytes\n"
@@ -43,7 +43,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" suppress-vmdesc=on|off disables self-describing migration (default=off)\n"
" nvdimm=on|off controls NVDIMM support (default=off)\n"
" enforce-config-section=on|off enforce configuration section migration (default=off)\n"
- " s390-squash-mcss=on|off controls support for squashing into default css (default=off)\n",
+ " s390-squash-mcss=on|off (deprecated) controls support for squashing into default css (default=off)\n",
QEMU_ARCH_ALL)
STEXI
@item -machine [type=]@var{name}[,prop=@var{value}[,...]]
@@ -66,7 +66,7 @@ Supported machine properties are:
@table @option
@item accel=@var{accels1}[:@var{accels2}[:...]]
This is used to enable an accelerator. Depending on the target architecture,
-kvm, xen, hax or tcg can be available. By default, tcg is used. If there is
+kvm, xen, hax, hvf or tcg can be available. By default, tcg is used. If there is
more than one accelerator specified, the next one is used if the previous one
fails to initialize.
@item kernel_irqchip=on|off
@@ -98,6 +98,12 @@ Enables or disables NVDIMM support. The default is off.
@item s390-squash-mcss=on|off
Enables or disables squashing subchannels into the default css.
The default is off.
+NOTE: This property is deprecated and will be removed in future releases.
+The ``s390-squash-mcss=on`` property has been obsoleted by allowing the
+cssid to be chosen freely. Instead of squashing subchannels into the
+default channel subsystem image for guests that do not support multiple
+channel subsystems, all devices can be put into the default channel
+subsystem image.
@item enforce-config-section=on|off
If @option{enforce-config-section} is set to @var{on}, force migration
code to send configuration section even if the machine-type sets the
@@ -120,13 +126,13 @@ ETEXI
DEF("accel", HAS_ARG, QEMU_OPTION_accel,
"-accel [accel=]accelerator[,thread=single|multi]\n"
- " select accelerator (kvm, xen, hax or tcg; use 'help' for a list)\n"
- " thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL)
+ " select accelerator (kvm, xen, hax, hvf or tcg; use 'help' for a list)\n"
+ " thread=single|multi (enable multi-threaded TCG)", QEMU_ARCH_ALL)
STEXI
@item -accel @var{name}[,prop=@var{value}[,...]]
@findex -accel
This is used to enable an accelerator. Depending on the target architecture,
-kvm, xen, hax or tcg can be available. By default, tcg is used. If there is
+kvm, xen, hax, hvf or tcg can be available. By default, tcg is used. If there is
more than one accelerator specified, the next one is used if the previous one
fails to initialize.
@table @option
@@ -578,7 +584,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Block device options)
+DEFHEADING(Block device options:)
STEXI
@table @option
ETEXI
@@ -840,8 +846,8 @@ of available connectors of a given interface type.
@item media=@var{media}
This option defines the type of the media: disk or cdrom.
@item cyls=@var{c},heads=@var{h},secs=@var{s}[,trans=@var{t}]
-These options have the same definition as they have in @option{-hdachs}.
-These parameters are deprecated, use the corresponding parameters
+Force disk physical geometry and the optional BIOS translation (trans=none or
+lba). These parameters are deprecated, use the corresponding parameters
of @code{-device} instead.
@item snapshot=@var{snapshot}
@var{snapshot} is "on" or "off" and controls snapshot mode for the given drive
@@ -1021,21 +1027,6 @@ the raw disk image you use is not written back. You can however force
the write back by pressing @key{C-a s} (@pxref{disk_images}).
ETEXI
-DEF("hdachs", HAS_ARG, QEMU_OPTION_hdachs, \
- "-hdachs c,h,s[,t]\n" \
- " force hard disk 0 physical geometry and the optional BIOS\n" \
- " translation (t=none or lba) (usually QEMU can guess them)\n",
- QEMU_ARCH_ALL)
-STEXI
-@item -hdachs @var{c},@var{h},@var{s},[,@var{t}]
-@findex -hdachs
-Force hard disk 0 physical geometry (1 <= @var{c} <= 16383, 1 <=
-@var{h} <= 16, 1 <= @var{s} <= 63) and optionally force the BIOS
-translation mode (@var{t}=none, lba or auto). Usually QEMU can guess
-all those parameters. This option is deprecated, please use
-@code{-device ide-hd,cyls=c,heads=h,secs=s,...} instead.
-ETEXI
-
DEF("fsdev", HAS_ARG, QEMU_OPTION_fsdev,
"-fsdev fsdriver,id=id[,path=path,][security_model={mapped-xattr|mapped-file|passthrough|none}]\n"
" [,writeout=immediate][,readonly][,socket=socket|sock_fd=sock_fd][,fmode=fmode][,dmode=dmode]\n"
@@ -1176,12 +1167,25 @@ STEXI
Create synthetic file system image
ETEXI
+DEF("iscsi", HAS_ARG, QEMU_OPTION_iscsi,
+ "-iscsi [user=user][,password=password]\n"
+ " [,header-digest=CRC32C|CR32C-NONE|NONE-CRC32C|NONE\n"
+ " [,initiator-name=initiator-iqn][,id=target-iqn]\n"
+ " [,timeout=timeout]\n"
+ " iSCSI session parameters\n", QEMU_ARCH_ALL)
+
+STEXI
+@item -iscsi
+@findex -iscsi
+Configure iSCSI session parameters.
+ETEXI
+
STEXI
@end table
ETEXI
DEFHEADING()
-DEFHEADING(USB options)
+DEFHEADING(USB options:)
STEXI
@table @option
ETEXI
@@ -1246,7 +1250,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Display options)
+DEFHEADING(Display options:)
STEXI
@table @option
ETEXI
@@ -1783,7 +1787,7 @@ STEXI
ETEXI
ARCHHEADING(, QEMU_ARCH_I386)
-ARCHHEADING(i386 target only, QEMU_ARCH_I386)
+ARCHHEADING(i386 target only:, QEMU_ARCH_I386)
STEXI
@table @option
ETEXI
@@ -1899,7 +1903,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Network options)
+DEFHEADING(Network options:)
STEXI
@table @option
ETEXI
@@ -2016,9 +2020,10 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev,
"-netdev hubport,id=str,hubid=n\n"
" configure a hub port on QEMU VLAN 'n'\n", QEMU_ARCH_ALL)
DEF("net", HAS_ARG, QEMU_OPTION_net,
- "-net nic[,vlan=n][,macaddr=mac][,model=type][,name=str][,addr=str][,vectors=v]\n"
- " old way to create a new NIC and connect it to VLAN 'n'\n"
- " (use the '-device devtype,netdev=str' option if possible instead)\n"
+ "-net nic[,vlan=n][,netdev=nd][,macaddr=mac][,model=type][,name=str][,addr=str][,vectors=v]\n"
+ " configure or create an on-board (or machine default) NIC and\n"
+ " connect it either to VLAN 'n' or the netdev 'nd' (for pluggable\n"
+ " NICs please use '-device devtype,netdev=nd' instead)\n"
"-net dump[,vlan=n][,file=f][,len=n]\n"
" dump traffic on vlan 'n' to file 'f' (max n bytes per packet)\n"
"-net none use it alone to have zero network devices. If no -net option\n"
@@ -2039,10 +2044,11 @@ DEF("net", HAS_ARG, QEMU_OPTION_net,
" old way to initialize a host network interface\n"
" (use the -netdev option if possible instead)\n", QEMU_ARCH_ALL)
STEXI
-@item -net nic[,vlan=@var{n}][,macaddr=@var{mac}][,model=@var{type}] [,name=@var{name}][,addr=@var{addr}][,vectors=@var{v}]
+@item -net nic[,vlan=@var{n}][,netdev=@var{nd}][,macaddr=@var{mac}][,model=@var{type}] [,name=@var{name}][,addr=@var{addr}][,vectors=@var{v}]
@findex -net
-Create a new Network Interface Card and connect it to VLAN @var{n} (@var{n}
-= 0 is the default). The NIC is an e1000 by default on the PC
+Configure or create an on-board (or machine default) Network Interface Card
+(NIC) and connect it either to VLAN @var{n} (@var{n} = 0 is the default), or
+to the netdev @var{nd}. The NIC is an e1000 by default on the PC
target. Optionally, the MAC address can be changed to @var{mac}, the
device address set to @var{addr} (PCI cards only),
and a @var{name} can be assigned for use in monitor commands.
@@ -2371,6 +2377,7 @@ two systems. It is present in routers, firewalls and the Linux kernel
This transport allows a VM to communicate to another VM, router or firewall directly.
+@table @option
@item src=@var{srcaddr}
source address (mandatory)
@item dst=@var{dstaddr}
@@ -2398,6 +2405,7 @@ draft-mkonstan-l2tpext-keyed-ipv6-tunnel-00
networks which have packet reorder.
@item offset=@var{offset}
Add an extra offset between header and data
+@end table
For example, to attach a VM running on host 4.3.2.1 via L2TPv3 to the bridge br-lan
on the remote Linux host 1.2.3.4:
@@ -2480,12 +2488,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Character device options)
-STEXI
-
-The general form of a character device option is:
-@table @option
-ETEXI
+DEFHEADING(Character device options:)
DEF("chardev", HAS_ARG, QEMU_OPTION_chardev,
"-chardev help\n"
@@ -2531,6 +2534,9 @@ DEF("chardev", HAS_ARG, QEMU_OPTION_chardev,
)
STEXI
+
+The general form of a character device option is:
+@table @option
@item -chardev @var{backend} ,id=@var{id} [,mux=on|off] [,@var{options}]
@findex -chardev
Backend is one of:
@@ -2554,7 +2560,7 @@ Backend is one of:
@option{spiceport}.
The specific backend will determine the applicable options.
-Use "-chardev help" to print all available chardev backend types.
+Use @code{-chardev help} to print all available chardev backend types.
All devices must have an id, which can be any string up to 127 characters long.
It is used to uniquely identify this device in other command line directives.
@@ -2609,8 +2615,11 @@ to a file to record all data transmitted via the backend. The @option{logappend}
option controls whether the log file will be truncated or appended to when
opened.
-Further options to each backend are described below.
+@end table
+
+The available backends are:
+@table @option
@item -chardev null ,id=@var{id}
A void device. This device will not emit any data, and will drop any data it
receives. The null backend does not take any options.
@@ -2813,237 +2822,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Device URL Syntax)
-STEXI
-
-In addition to using normal file images for the emulated storage devices,
-QEMU can also use networked resources such as iSCSI devices. These are
-specified using a special URL syntax.
-
-@table @option
-@item iSCSI
-iSCSI support allows QEMU to access iSCSI resources directly and use as
-images for the guest storage. Both disk and cdrom images are supported.
-
-Syntax for specifying iSCSI LUNs is
-``iscsi://<target-ip>[:<port>]/<target-iqn>/<lun>''
-
-By default qemu will use the iSCSI initiator-name
-'iqn.2008-11.org.linux-kvm[:<name>]' but this can also be set from the command
-line or a configuration file.
-
-Since version Qemu 2.4 it is possible to specify a iSCSI request timeout to detect
-stalled requests and force a reestablishment of the session. The timeout
-is specified in seconds. The default is 0 which means no timeout. Libiscsi
-1.15.0 or greater is required for this feature.
-
-Example (without authentication):
-@example
-qemu-system-i386 -iscsi initiator-name=iqn.2001-04.com.example:my-initiator \
- -cdrom iscsi://192.0.2.1/iqn.2001-04.com.example/2 \
- -drive file=iscsi://192.0.2.1/iqn.2001-04.com.example/1
-@end example
-
-Example (CHAP username/password via URL):
-@example
-qemu-system-i386 -drive file=iscsi://user%password@@192.0.2.1/iqn.2001-04.com.example/1
-@end example
-
-Example (CHAP username/password via environment variables):
-@example
-LIBISCSI_CHAP_USERNAME="user" \
-LIBISCSI_CHAP_PASSWORD="password" \
-qemu-system-i386 -drive file=iscsi://192.0.2.1/iqn.2001-04.com.example/1
-@end example
-
-iSCSI support is an optional feature of QEMU and only available when
-compiled and linked against libiscsi.
-ETEXI
-DEF("iscsi", HAS_ARG, QEMU_OPTION_iscsi,
- "-iscsi [user=user][,password=password]\n"
- " [,header-digest=CRC32C|CR32C-NONE|NONE-CRC32C|NONE\n"
- " [,initiator-name=initiator-iqn][,id=target-iqn]\n"
- " [,timeout=timeout]\n"
- " iSCSI session parameters\n", QEMU_ARCH_ALL)
-STEXI
-
-iSCSI parameters such as username and password can also be specified via
-a configuration file. See qemu-doc for more information and examples.
-
-@item NBD
-QEMU supports NBD (Network Block Devices) both using TCP protocol as well
-as Unix Domain Sockets.
-
-Syntax for specifying a NBD device using TCP
-``nbd:<server-ip>:<port>[:exportname=<export>]''
-
-Syntax for specifying a NBD device using Unix Domain Sockets
-``nbd:unix:<domain-socket>[:exportname=<export>]''
-
-
-Example for TCP
-@example
-qemu-system-i386 --drive file=nbd:192.0.2.1:30000
-@end example
-
-Example for Unix Domain Sockets
-@example
-qemu-system-i386 --drive file=nbd:unix:/tmp/nbd-socket
-@end example
-
-@item SSH
-QEMU supports SSH (Secure Shell) access to remote disks.
-
-Examples:
-@example
-qemu-system-i386 -drive file=ssh://user@@host/path/to/disk.img
-qemu-system-i386 -drive file.driver=ssh,file.user=user,file.host=host,file.port=22,file.path=/path/to/disk.img
-@end example
-
-Currently authentication must be done using ssh-agent. Other
-authentication methods may be supported in future.
-
-@item Sheepdog
-Sheepdog is a distributed storage system for QEMU.
-QEMU supports using either local sheepdog devices or remote networked
-devices.
-
-Syntax for specifying a sheepdog device
-@example
-sheepdog[+tcp|+unix]://[host:port]/vdiname[?socket=path][#snapid|#tag]
-@end example
-
-Example
-@example
-qemu-system-i386 --drive file=sheepdog://192.0.2.1:30000/MyVirtualMachine
-@end example
-
-See also @url{https://sheepdog.github.io/sheepdog/}.
-
-@item GlusterFS
-GlusterFS is a user space distributed file system.
-QEMU supports the use of GlusterFS volumes for hosting VM disk images using
-TCP, Unix Domain Sockets and RDMA transport protocols.
-
-Syntax for specifying a VM disk image on GlusterFS volume is
-@example
-
-URI:
-gluster[+type]://[host[:port]]/volume/path[?socket=...][,debug=N][,logfile=...]
-
-JSON:
-'json:@{"driver":"qcow2","file":@{"driver":"gluster","volume":"testvol","path":"a.img","debug":N,"logfile":"...",
-@ "server":[@{"type":"tcp","host":"...","port":"..."@},
-@ @{"type":"unix","socket":"..."@}]@}@}'
-@end example
-
-
-Example
-@example
-URI:
-qemu-system-x86_64 --drive file=gluster://192.0.2.1/testvol/a.img,
-@ file.debug=9,file.logfile=/var/log/qemu-gluster.log
-
-JSON:
-qemu-system-x86_64 'json:@{"driver":"qcow2",
-@ "file":@{"driver":"gluster",
-@ "volume":"testvol","path":"a.img",
-@ "debug":9,"logfile":"/var/log/qemu-gluster.log",
-@ "server":[@{"type":"tcp","host":"1.2.3.4","port":24007@},
-@ @{"type":"unix","socket":"/var/run/glusterd.socket"@}]@}@}'
-qemu-system-x86_64 -drive driver=qcow2,file.driver=gluster,file.volume=testvol,file.path=/path/a.img,
-@ file.debug=9,file.logfile=/var/log/qemu-gluster.log,
-@ file.server.0.type=tcp,file.server.0.host=1.2.3.4,file.server.0.port=24007,
-@ file.server.1.type=unix,file.server.1.socket=/var/run/glusterd.socket
-@end example
-
-See also @url{http://www.gluster.org}.
-
-@item HTTP/HTTPS/FTP/FTPS
-QEMU supports read-only access to files accessed over http(s) and ftp(s).
-
-Syntax using a single filename:
-@example
-<protocol>://[<username>[:<password>]@@]<host>/<path>
-@end example
-
-where:
-@table @option
-@item protocol
-'http', 'https', 'ftp', or 'ftps'.
-
-@item username
-Optional username for authentication to the remote server.
-
-@item password
-Optional password for authentication to the remote server.
-
-@item host
-Address of the remote server.
-
-@item path
-Path on the remote server, including any query string.
-@end table
-
-The following options are also supported:
-@table @option
-@item url
-The full URL when passing options to the driver explicitly.
-
-@item readahead
-The amount of data to read ahead with each range request to the remote server.
-This value may optionally have the suffix 'T', 'G', 'M', 'K', 'k' or 'b'. If it
-does not have a suffix, it will be assumed to be in bytes. The value must be a
-multiple of 512 bytes. It defaults to 256k.
-
-@item sslverify
-Whether to verify the remote server's certificate when connecting over SSL. It
-can have the value 'on' or 'off'. It defaults to 'on'.
-
-@item cookie
-Send this cookie (it can also be a list of cookies separated by ';') with
-each outgoing request. Only supported when using protocols such as HTTP
-which support cookies, otherwise ignored.
-
-@item timeout
-Set the timeout in seconds of the CURL connection. This timeout is the time
-that CURL waits for a response from the remote server to get the size of the
-image to be downloaded. If not set, the default timeout of 5 seconds is used.
-@end table
-
-Note that when passing options to qemu explicitly, @option{driver} is the value
-of <protocol>.
-
-Example: boot from a remote Fedora 20 live ISO image
-@example
-qemu-system-x86_64 --drive media=cdrom,file=http://dl.fedoraproject.org/pub/fedora/linux/releases/20/Live/x86_64/Fedora-Live-Desktop-x86_64-20-1.iso,readonly
-
-qemu-system-x86_64 --drive media=cdrom,file.driver=http,file.url=http://dl.fedoraproject.org/pub/fedora/linux/releases/20/Live/x86_64/Fedora-Live-Desktop-x86_64-20-1.iso,readonly
-@end example
-
-Example: boot from a remote Fedora 20 cloud image using a local overlay for
-writes, copy-on-read, and a readahead of 64k
-@example
-qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"http",, "file.url":"https://dl.fedoraproject.org/pub/fedora/linux/releases/20/Images/x86_64/Fedora-x86_64-20-20131211.1-sda.qcow2",, "file.readahead":"64k"@}' /tmp/Fedora-x86_64-20-20131211.1-sda.qcow2
-
-qemu-system-x86_64 -drive file=/tmp/Fedora-x86_64-20-20131211.1-sda.qcow2,copy-on-read=on
-@end example
-
-Example: boot from an image stored on a VMware vSphere server with a self-signed
-certificate using a local overlay for writes, a readahead of 64k and a timeout
-of 10 seconds.
-@example
-qemu-img create -f qcow2 -o backing_file='json:@{"file.driver":"https",, "file.url":"https://user:password@@vsphere.example.com/folder/test/test-flat.vmdk?dcPath=Datacenter&dsName=datastore1",, "file.sslverify":"off",, "file.readahead":"64k",, "file.timeout":10@}' /tmp/test.qcow2
-
-qemu-system-x86_64 -drive file=/tmp/test.qcow2
-@end example
-ETEXI
-
-STEXI
-@end table
-ETEXI
-
-DEFHEADING(Bluetooth(R) options)
+DEFHEADING(Bluetooth(R) options:)
STEXI
@table @option
ETEXI
@@ -3119,7 +2898,7 @@ ETEXI
DEFHEADING()
#ifdef CONFIG_TPM
-DEFHEADING(TPM device options)
+DEFHEADING(TPM device options:)
DEF("tpmdev", HAS_ARG, QEMU_OPTION_tpmdev, \
"-tpmdev passthrough,id=id[,path=path][,cancel-path=path]\n"
@@ -3136,19 +2915,18 @@ The general form of a TPM device option is:
@item -tpmdev @var{backend} ,id=@var{id} [,@var{options}]
@findex -tpmdev
-Backend type must be either one of the following:
-@option{passthrough}, @option{emulator}.
The specific backend type will determine the applicable options.
The @code{-tpmdev} option creates the TPM backend and requires a
@code{-device} option that specifies the TPM frontend interface model.
-Options to each backend are described below.
+Use @code{-tpmdev help} to print all available TPM backend types.
-Use 'help' to print all available TPM backend types.
-@example
-qemu -tpmdev help
-@end example
+@end table
+
+The available backends are:
+
+@table @option
@item -tpmdev passthrough, id=@var{id}, path=@var{path}, cancel-path=@var{cancel-path}
@@ -3201,15 +2979,16 @@ To create a TPM emulator backend device with chardev socket backend:
@end example
-@end table
-
ETEXI
+STEXI
+@end table
+ETEXI
DEFHEADING()
#endif
-DEFHEADING(Linux/Multiboot boot specific)
+DEFHEADING(Linux/Multiboot boot specific:)
STEXI
When using these options, you can use a given Linux or Multiboot
@@ -3265,7 +3044,7 @@ STEXI
ETEXI
DEFHEADING()
-DEFHEADING(Debug/Expert options)
+DEFHEADING(Debug/Expert options:)
STEXI
@table @option
ETEXI
@@ -3481,11 +3260,12 @@ Like -qmp but uses pretty JSON formatting.
ETEXI
DEF("mon", HAS_ARG, QEMU_OPTION_mon, \
- "-mon [chardev=]name[,mode=readline|control]\n", QEMU_ARCH_ALL)
+ "-mon [chardev=]name[,mode=readline|control][,pretty[=on|off]]\n", QEMU_ARCH_ALL)
STEXI
-@item -mon [chardev=]name[,mode=readline|control]
+@item -mon [chardev=]name[,mode=readline|control][,pretty[=on|off]]
@findex -mon
-Setup monitor on chardev @var{name}.
+Setup monitor on chardev @var{name}. @code{pretty} turns on JSON pretty printing
+easing human reading and debugging.
ETEXI
DEF("debugcon", HAS_ARG, QEMU_OPTION_debugcon, \
@@ -3830,7 +3610,7 @@ A virtual watchdog for s390x backed by the diagnose 288 hypercall
ETEXI
DEF("watchdog-action", HAS_ARG, QEMU_OPTION_watchdog_action, \
- "-watchdog-action reset|shutdown|poweroff|pause|debug|none\n" \
+ "-watchdog-action reset|shutdown|poweroff|inject-nmi|pause|debug|none\n" \
" action when watchdog fires [default=reset]\n",
QEMU_ARCH_ALL)
STEXI
@@ -3844,6 +3624,7 @@ The default is
Other possible actions are:
@code{shutdown} (attempt to gracefully shutdown the guest),
@code{poweroff} (forcefully poweroff the guest),
+@code{inject-nmi} (inject a NMI into the guest),
@code{pause} (pause the guest),
@code{debug} (print a debug message and continue), or
@code{none} (do nothing).
@@ -4133,9 +3914,6 @@ HXCOMM Deprecated by kvm-pit driver properties
DEF("no-kvm-pit-reinjection", 0, QEMU_OPTION_no_kvm_pit_reinjection,
"", QEMU_ARCH_I386)
-HXCOMM Deprecated (ignored)
-DEF("no-kvm-pit", 0, QEMU_OPTION_no_kvm_pit, "", QEMU_ARCH_I386)
-
HXCOMM Deprecated by -machine kernel_irqchip=on|off property
DEF("no-kvm-irqchip", 0, QEMU_OPTION_no_kvm_irqchip, "", QEMU_ARCH_I386)
@@ -4171,7 +3949,8 @@ STEXI
@end table
ETEXI
DEFHEADING()
-DEFHEADING(Generic object creation)
+
+DEFHEADING(Generic object creation:)
STEXI
@table @option
ETEXI
diff --git a/qga/channel-posix.c b/qga/channel-posix.c
index 3f34465159..b812bf4d51 100644
--- a/qga/channel-posix.c
+++ b/qga/channel-posix.c
@@ -190,7 +190,7 @@ static gboolean ga_channel_open(GAChannel *c, const gchar *path,
if (fd < 0) {
Error *local_err = NULL;
- fd = unix_listen(path, NULL, strlen(path), &local_err);
+ fd = unix_listen(path, &local_err);
if (local_err != NULL) {
g_critical("%s", error_get_pretty(local_err));
error_free(local_err);
diff --git a/qmp.c b/qmp.c
index e8c303116a..52cfd2d81c 100644
--- a/qmp.c
+++ b/qmp.c
@@ -113,11 +113,6 @@ void qmp_system_powerdown(Error **erp)
qemu_system_powerdown_request();
}
-void qmp_cpu(int64_t index, Error **errp)
-{
- /* Just do nothing */
-}
-
void qmp_cpu_add(int64_t id, Error **errp)
{
MachineClass *mc;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 34df753571..3dc27d9656 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2475,8 +2475,11 @@ sub process {
# no volatiles please
my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
- if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
- ERROR("Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
+ if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/ &&
+ $line !~ /sig_atomic_t/ &&
+ !ctx_has_comment($first_line, $linenr)) {
+ my $msg = "Use of volatile is usually wrong, please add a comment\n" . $herecurr;
+ ERROR($msg);
}
# warn about #if 0
diff --git a/scripts/coccinelle/cpu_restore_state.cocci b/scripts/coccinelle/cpu_restore_state.cocci
new file mode 100644
index 0000000000..61bc749d14
--- /dev/null
+++ b/scripts/coccinelle/cpu_restore_state.cocci
@@ -0,0 +1,19 @@
+// Remove unneeded tests before calling cpu_restore_state
+//
+// spatch --macro-file scripts/cocci-macro-file.h \
+// --sp-file ./scripts/coccinelle/cpu_restore_state.cocci \
+// --keep-comments --in-place --use-gitgrep --dir target
+@@
+expression A;
+expression C;
+@@
+-if (A) {
+ cpu_restore_state(C, A);
+-}
+@@
+expression A;
+expression C;
+@@
+- cpu_restore_state(C, A);
+- cpu_loop_exit(C);
++ cpu_loop_exit_restore(C, A);
diff --git a/scripts/dump-guest-memory.py b/scripts/dump-guest-memory.py
index 1af26c1a45..09bec92b50 100644
--- a/scripts/dump-guest-memory.py
+++ b/scripts/dump-guest-memory.py
@@ -546,8 +546,7 @@ shape and this command should mostly work."""
return None
def add_vmcoreinfo(self):
- vmci = '(VMCoreInfoState *)' + \
- 'object_resolve_path_type("", "vmcoreinfo", 0)'
+ vmci = 'vmcoreinfo_realize::vmcoreinfo_state'
if not gdb.parse_and_eval("%s" % vmci) \
or not gdb.parse_and_eval("(%s)->has_vmcoreinfo" % vmci):
return
diff --git a/scripts/git-submodule.sh b/scripts/git-submodule.sh
index 030617b4ac..bc7224a27f 100755
--- a/scripts/git-submodule.sh
+++ b/scripts/git-submodule.sh
@@ -24,7 +24,7 @@ error() {
echo "Alternatively you may disable automatic GIT submodule checkout"
echo "with:"
echo
- echo " $ ./configure --disable-git-update'"
+ echo " $ ./configure --disable-git-update"
echo
echo "and then manually update submodules prior to running make, with:"
echo
diff --git a/scripts/hxtool b/scripts/hxtool
index 1e2c97c5e6..7d7c4289e3 100644
--- a/scripts/hxtool
+++ b/scripts/hxtool
@@ -19,7 +19,8 @@ hxtoh()
print_texi_heading()
{
if test "$*" != ""; then
- printf "@subsection %s\n" "$*"
+ title="$*"
+ printf "@subsection %s\n" "${title%:}"
fi
}
diff --git a/scripts/qapi.py b/scripts/qapi.py
index 62dc52ed6e..43a54bf40f 100644
--- a/scripts/qapi.py
+++ b/scripts/qapi.py
@@ -106,13 +106,10 @@ class QAPIDoc(object):
# optional section name (argument/member or section name)
self.name = name
# the list of lines for this section
- self.content = []
+ self.text = ''
def append(self, line):
- self.content.append(line)
-
- def __repr__(self):
- return '\n'.join(self.content).strip()
+ self.text += line.rstrip() + '\n'
class ArgSection(Section):
def __init__(self, name):
@@ -123,11 +120,11 @@ class QAPIDoc(object):
self.member = member
def __init__(self, parser, info):
- # self.parser is used to report errors with QAPIParseError. The
+ # self._parser is used to report errors with QAPIParseError. The
# resulting error position depends on the state of the parser.
# It happens to be the beginning of the comment. More or less
# servicable, but action at a distance.
- self.parser = parser
+ self._parser = parser
self.info = info
self.symbol = None
self.body = QAPIDoc.Section()
@@ -136,7 +133,7 @@ class QAPIDoc(object):
# a list of Section
self.sections = []
# the current section
- self.section = self.body
+ self._section = self.body
def has_section(self, name):
"""Return True if we have a section with this name."""
@@ -153,20 +150,20 @@ class QAPIDoc(object):
return
if line[0] != ' ':
- raise QAPIParseError(self.parser, "Missing space after #")
+ raise QAPIParseError(self._parser, "Missing space after #")
line = line[1:]
# FIXME not nice: things like '# @foo:' and '# @foo: ' aren't
# recognized, and get silently treated as ordinary text
if self.symbol:
self._append_symbol_line(line)
- elif not self.body.content and line.startswith('@'):
+ elif not self.body.text and line.startswith('@'):
if not line.endswith(':'):
- raise QAPIParseError(self.parser, "Line should end with :")
+ raise QAPIParseError(self._parser, "Line should end with :")
self.symbol = line[1:-1]
# FIXME invalid names other than the empty string aren't flagged
if not self.symbol:
- raise QAPIParseError(self.parser, "Invalid name")
+ raise QAPIParseError(self._parser, "Invalid name")
else:
self._append_freeform(line)
@@ -192,53 +189,48 @@ class QAPIDoc(object):
def _start_args_section(self, name):
# FIXME invalid names other than the empty string aren't flagged
if not name:
- raise QAPIParseError(self.parser, "Invalid parameter name")
+ raise QAPIParseError(self._parser, "Invalid parameter name")
if name in self.args:
- raise QAPIParseError(self.parser,
+ raise QAPIParseError(self._parser,
"'%s' parameter name duplicated" % name)
if self.sections:
- raise QAPIParseError(self.parser,
+ raise QAPIParseError(self._parser,
"'@%s:' can't follow '%s' section"
% (name, self.sections[0].name))
self._end_section()
- self.section = QAPIDoc.ArgSection(name)
- self.args[name] = self.section
+ self._section = QAPIDoc.ArgSection(name)
+ self.args[name] = self._section
- def _start_section(self, name=''):
+ def _start_section(self, name=None):
if name in ('Returns', 'Since') and self.has_section(name):
- raise QAPIParseError(self.parser,
+ raise QAPIParseError(self._parser,
"Duplicated '%s' section" % name)
self._end_section()
- self.section = QAPIDoc.Section(name)
- self.sections.append(self.section)
+ self._section = QAPIDoc.Section(name)
+ self.sections.append(self._section)
def _end_section(self):
- if self.section:
- contents = str(self.section)
- if self.section.name and (not contents or contents.isspace()):
- raise QAPIParseError(self.parser, "Empty doc section '%s'"
- % self.section.name)
- self.section = None
+ if self._section:
+ text = self._section.text = self._section.text.strip()
+ if self._section.name and (not text or text.isspace()):
+ raise QAPIParseError(self._parser, "Empty doc section '%s'"
+ % self._section.name)
+ self._section = None
def _append_freeform(self, line):
- in_arg = isinstance(self.section, QAPIDoc.ArgSection)
- if (in_arg and self.section.content
- and not self.section.content[-1]
+ in_arg = isinstance(self._section, QAPIDoc.ArgSection)
+ if (in_arg and self._section.text.endswith('\n\n')
and line and not line[0].isspace()):
self._start_section()
- if (in_arg or not self.section.name
- or not self.section.name.startswith('Example')):
+ if (in_arg or not self._section.name
+ or not self._section.name.startswith('Example')):
line = line.strip()
match = re.match(r'(@\S+:)', line)
if match:
- raise QAPIParseError(self.parser,
+ raise QAPIParseError(self._parser,
"'%s' not allowed in free-form documentation"
% match.group(1))
- # TODO Drop this once the dust has settled
- if (isinstance(self.section, QAPIDoc.ArgSection)
- and '#optional' in line):
- raise QAPISemError(self.info, "Please drop the #optional tag")
- self.section.append(line)
+ self._section.append(line)
def connect_member(self, member):
if member.name not in self.args:
@@ -265,8 +257,7 @@ class QAPISchemaParser(object):
def __init__(self, fp, previously_included=[], incl_info=None):
abs_fname = os.path.abspath(fp.name)
- fname = fp.name
- self.fname = fname
+ self.fname = fp.name
previously_included.append(abs_fname)
self.incl_info = incl_info
self.src = fp.read()
@@ -277,21 +268,21 @@ class QAPISchemaParser(object):
self.line_pos = 0
self.exprs = []
self.docs = []
- self.cur_doc = None
self.accept()
+ cur_doc = None
while self.tok is not None:
- info = {'file': fname, 'line': self.line,
+ info = {'file': self.fname, 'line': self.line,
'parent': self.incl_info}
if self.tok == '#':
- self.reject_expr_doc()
- self.cur_doc = self.get_doc(info)
- self.docs.append(self.cur_doc)
+ self.reject_expr_doc(cur_doc)
+ cur_doc = self.get_doc(info)
+ self.docs.append(cur_doc)
continue
expr = self.get_expr(False)
if 'include' in expr:
- self.reject_expr_doc()
+ self.reject_expr_doc(cur_doc)
if len(expr) != 1:
raise QAPISemError(info, "Invalid 'include' directive")
include = expr['include']
@@ -301,7 +292,7 @@ class QAPISchemaParser(object):
self._include(include, info, os.path.dirname(abs_fname),
previously_included)
elif "pragma" in expr:
- self.reject_expr_doc()
+ self.reject_expr_doc(cur_doc)
if len(expr) != 1:
raise QAPISemError(info, "Invalid 'pragma' directive")
pragma = expr['pragma']
@@ -313,22 +304,22 @@ class QAPISchemaParser(object):
else:
expr_elem = {'expr': expr,
'info': info}
- if self.cur_doc:
- if not self.cur_doc.symbol:
+ if cur_doc:
+ if not cur_doc.symbol:
raise QAPISemError(
- self.cur_doc.info,
- "Expression documentation required")
- expr_elem['doc'] = self.cur_doc
+ cur_doc.info, "Expression documentation required")
+ expr_elem['doc'] = cur_doc
self.exprs.append(expr_elem)
- self.cur_doc = None
- self.reject_expr_doc()
+ cur_doc = None
+ self.reject_expr_doc(cur_doc)
- def reject_expr_doc(self):
- if self.cur_doc and self.cur_doc.symbol:
+ @staticmethod
+ def reject_expr_doc(doc):
+ if doc and doc.symbol:
raise QAPISemError(
- self.cur_doc.info,
+ doc.info,
"Documentation for '%s' is not followed by the definition"
- % self.cur_doc.symbol)
+ % doc.symbol)
def _include(self, include, info, base_dir, previously_included):
incl_abs_fname = os.path.join(base_dir, include)
diff --git a/scripts/qapi2texi.py b/scripts/qapi2texi.py
index a317526e51..92e2af2cd6 100755
--- a/scripts/qapi2texi.py
+++ b/scripts/qapi2texi.py
@@ -13,7 +13,6 @@ MSG_FMT = """
@deftypefn {type} {{}} {name}
{body}
-
@end deftypefn
""".format
@@ -22,7 +21,6 @@ TYPE_FMT = """
@deftp {{{type}}} {name}
{body}
-
@end deftp
""".format
@@ -74,7 +72,7 @@ def texi_format(doc):
- 1. or 1): generates an @enumerate @item
- */-: generates an @itemize list
"""
- lines = []
+ ret = ''
doc = subst_braces(doc)
doc = subst_vars(doc)
doc = subst_emph(doc)
@@ -100,32 +98,32 @@ def texi_format(doc):
line = '@subsection ' + line[3:]
elif re.match(r'^([0-9]*\.) ', line):
if not inlist:
- lines.append('@enumerate')
+ ret += '@enumerate\n'
inlist = 'enumerate'
+ ret += '@item\n'
line = line[line.find(' ')+1:]
- lines.append('@item')
elif re.match(r'^[*-] ', line):
if not inlist:
- lines.append('@itemize %s' % {'*': '@bullet',
- '-': '@minus'}[line[0]])
+ ret += '@itemize %s\n' % {'*': '@bullet',
+ '-': '@minus'}[line[0]]
inlist = 'itemize'
- lines.append('@item')
+ ret += '@item\n'
line = line[2:]
elif lastempty and inlist:
- lines.append('@end %s\n' % inlist)
+ ret += '@end %s\n\n' % inlist
inlist = ''
lastempty = empty
- lines.append(line)
+ ret += line + '\n'
if inlist:
- lines.append('@end %s\n' % inlist)
- return '\n'.join(lines)
+ ret += '@end %s\n\n' % inlist
+ return ret
def texi_body(doc):
"""Format the main documentation body"""
- return texi_format(str(doc.body)) + '\n'
+ return texi_format(doc.body.text)
def texi_enum_value(value):
@@ -149,15 +147,16 @@ def texi_members(doc, what, base, variants, member_func):
items = ''
for section in doc.args.itervalues():
# TODO Drop fallbacks when undocumented members are outlawed
- if section.content:
- desc = texi_format(str(section))
+ if section.text:
+ desc = texi_format(section.text)
elif (variants and variants.tag_member == section.member
and not section.member.type.doc_type()):
values = section.member.type.member_names()
- desc = 'One of ' + ', '.join(['@t{"%s"}' % v for v in values])
+ members_text = ', '.join(['@t{"%s"}' % v for v in values])
+ desc = 'One of ' + members_text + '\n'
else:
- desc = 'Not documented'
- items += member_func(section.member) + desc + '\n'
+ desc = 'Not documented\n'
+ items += member_func(section.member) + desc
if base:
items += '@item The members of @code{%s}\n' % base.doc_type()
if variants:
@@ -180,16 +179,13 @@ def texi_sections(doc):
"""Format additional sections following arguments"""
body = ''
for section in doc.sections:
- name, doc = (section.name, str(section))
- func = texi_format
- if name.startswith('Example'):
- func = texi_example
-
- if name:
+ if section.name:
# prefer @b over @strong, so txt doesn't translate it to *Foo:*
- body += '\n\n@b{%s:}\n' % name
-
- body += func(doc)
+ body += '\n@b{%s:}\n' % section.name
+ if section.name and section.name.startswith('Example'):
+ body += texi_example(section.text)
+ else:
+ body += texi_format(section.text)
return body
@@ -210,8 +206,6 @@ class QAPISchemaGenDocVisitor(qapi.QAPISchemaVisitor):
def visit_enum_type(self, name, info, values, prefix):
doc = self.cur_doc
- if self.out:
- self.out += '\n'
self.out += TYPE_FMT(type='Enum',
name=doc.symbol,
body=texi_entity(doc, 'Values',
@@ -221,16 +215,12 @@ class QAPISchemaGenDocVisitor(qapi.QAPISchemaVisitor):
doc = self.cur_doc
if base and base.is_implicit():
base = None
- if self.out:
- self.out += '\n'
self.out += TYPE_FMT(type='Object',
name=doc.symbol,
body=texi_entity(doc, 'Members', base, variants))
def visit_alternate_type(self, name, info, variants):
doc = self.cur_doc
- if self.out:
- self.out += '\n'
self.out += TYPE_FMT(type='Alternate',
name=doc.symbol,
body=texi_entity(doc, 'Members'))
@@ -238,11 +228,10 @@ class QAPISchemaGenDocVisitor(qapi.QAPISchemaVisitor):
def visit_command(self, name, info, arg_type, ret_type,
gen, success_response, boxed):
doc = self.cur_doc
- if self.out:
- self.out += '\n'
if boxed:
body = texi_body(doc)
- body += '\n@b{Arguments:} the members of @code{%s}' % arg_type.name
+ body += ('\n@b{Arguments:} the members of @code{%s}\n'
+ % arg_type.name)
body += texi_sections(doc)
else:
body = texi_entity(doc, 'Arguments')
@@ -252,13 +241,13 @@ class QAPISchemaGenDocVisitor(qapi.QAPISchemaVisitor):
def visit_event(self, name, info, arg_type, boxed):
doc = self.cur_doc
- if self.out:
- self.out += '\n'
self.out += MSG_FMT(type='Event',
name=doc.symbol,
body=texi_entity(doc, 'Arguments'))
def symbol(self, doc, entity):
+ if self.out:
+ self.out += '\n'
self.cur_doc = doc
entity.visit(self)
self.cur_doc = None
diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c
index dd9785143b..9fe615c73c 100644
--- a/scsi/qemu-pr-helper.c
+++ b/scsi/qemu-pr-helper.c
@@ -314,6 +314,22 @@ static int is_mpath(int fd)
return !strncmp(tgt->target_type, "multipath", DM_MAX_TYPE_NAME);
}
+static SCSISense mpath_generic_sense(int r)
+{
+ switch (r) {
+ case MPATH_PR_SENSE_NOT_READY:
+ return SENSE_CODE(NOT_READY);
+ case MPATH_PR_SENSE_MEDIUM_ERROR:
+ return SENSE_CODE(READ_ERROR);
+ case MPATH_PR_SENSE_HARDWARE_ERROR:
+ return SENSE_CODE(TARGET_FAILURE);
+ case MPATH_PR_SENSE_ABORTED_COMMAND:
+ return SENSE_CODE(IO_ERROR);
+ default:
+ abort();
+ }
+}
+
static int mpath_reconstruct_sense(int fd, int r, uint8_t *sense)
{
switch (r) {
@@ -329,7 +345,13 @@ static int mpath_reconstruct_sense(int fd, int r, uint8_t *sense)
*/
uint8_t cdb[6] = { TEST_UNIT_READY };
int sz = 0;
- return do_sgio(fd, cdb, sense, NULL, &sz, SG_DXFER_NONE);
+ int r = do_sgio(fd, cdb, sense, NULL, &sz, SG_DXFER_NONE);
+
+ if (r != GOOD) {
+ return r;
+ }
+ scsi_build_sense(sense, mpath_generic_sense(r));
+ return CHECK_CONDITION;
}
case MPATH_PR_SENSE_UNIT_ATTENTION:
@@ -449,7 +471,7 @@ static int multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
memset(&paramp, 0, sizeof(paramp));
memcpy(&paramp.key, &param[0], 8);
memcpy(&paramp.sa_key, &param[8], 8);
- paramp.sa_flags = param[10];
+ paramp.sa_flags = param[20];
if (sz > PR_OUT_FIXED_PARAM_SIZE) {
size_t transportid_len;
int i, j;
@@ -478,8 +500,8 @@ static int multipath_pr_out(int fd, const uint8_t *cdb, uint8_t *sense,
j += offsetof(struct transportid, n_port_name[8]);
i += 24;
break;
- case 3:
- case 0x43:
+ case 5:
+ case 0x45:
/* iSCSI transport. */
len = lduw_be_p(&param[i + 2]);
if (len > 252 || (len & 3) || i + len + 4 > transportid_len) {
diff --git a/scsi/utils.c b/scsi/utils.c
index 5684951b12..ddae650a99 100644
--- a/scsi/utils.c
+++ b/scsi/utils.c
@@ -96,15 +96,60 @@ int scsi_cdb_length(uint8_t *buf)
return cdb_len;
}
+SCSISense scsi_parse_sense_buf(const uint8_t *in_buf, int in_len)
+{
+ bool fixed_in;
+ SCSISense sense;
+
+ assert(in_len > 0);
+ fixed_in = (in_buf[0] & 2) == 0;
+ if (fixed_in) {
+ if (in_len < 14) {
+ return SENSE_CODE(IO_ERROR);
+ }
+ sense.key = in_buf[2];
+ sense.asc = in_buf[12];
+ sense.ascq = in_buf[13];
+ } else {
+ if (in_len < 4) {
+ return SENSE_CODE(IO_ERROR);
+ }
+ sense.key = in_buf[1];
+ sense.asc = in_buf[2];
+ sense.ascq = in_buf[3];
+ }
+
+ return sense;
+}
+
+int scsi_build_sense_buf(uint8_t *out_buf, size_t size, SCSISense sense,
+ bool fixed_sense)
+{
+ int len;
+ uint8_t buf[SCSI_SENSE_LEN] = { 0 };
+
+ if (fixed_sense) {
+ buf[0] = 0x70;
+ buf[2] = sense.key;
+ buf[7] = 10;
+ buf[12] = sense.asc;
+ buf[13] = sense.ascq;
+ len = 18;
+ } else {
+ buf[0] = 0x72;
+ buf[1] = sense.key;
+ buf[2] = sense.asc;
+ buf[3] = sense.ascq;
+ len = 8;
+ }
+ len = MIN(len, size);
+ memcpy(out_buf, buf, len);
+ return len;
+}
+
int scsi_build_sense(uint8_t *buf, SCSISense sense)
{
- memset(buf, 0, 18);
- buf[0] = 0x70;
- buf[2] = sense.key;
- buf[7] = 10;
- buf[12] = sense.asc;
- buf[13] = sense.ascq;
- return 18;
+ return scsi_build_sense_buf(buf, SCSI_SENSE_LEN, sense, true);
}
/*
@@ -211,6 +256,16 @@ const struct SCSISense sense_code_LUN_COMM_FAILURE = {
.key = ABORTED_COMMAND, .asc = 0x08, .ascq = 0x00
};
+/* Medium Error, Unrecovered read error */
+const struct SCSISense sense_code_READ_ERROR = {
+ .key = MEDIUM_ERROR, .asc = 0x11, .ascq = 0x00
+};
+
+/* Not ready, Cause not reportable */
+const struct SCSISense sense_code_NOT_READY = {
+ .key = NOT_READY, .asc = 0x04, .ascq = 0x00
+};
+
/* Unit attention, Capacity data has changed */
const struct SCSISense sense_code_CAPACITY_CHANGED = {
.key = UNIT_ATTENTION, .asc = 0x2a, .ascq = 0x09
@@ -264,67 +319,36 @@ const struct SCSISense sense_code_SPACE_ALLOC_FAILED = {
int scsi_convert_sense(uint8_t *in_buf, int in_len,
uint8_t *buf, int len, bool fixed)
{
- bool fixed_in;
SCSISense sense;
- if (!fixed && len < 8) {
- return 0;
- }
-
- if (in_len == 0) {
- sense.key = NO_SENSE;
- sense.asc = 0;
- sense.ascq = 0;
- } else {
- fixed_in = (in_buf[0] & 2) == 0;
-
- if (fixed == fixed_in) {
- memcpy(buf, in_buf, MIN(len, in_len));
- return MIN(len, in_len);
- }
+ bool fixed_in;
- if (fixed_in) {
- sense.key = in_buf[2];
- sense.asc = in_buf[12];
- sense.ascq = in_buf[13];
- } else {
- sense.key = in_buf[1];
- sense.asc = in_buf[2];
- sense.ascq = in_buf[3];
- }
+ fixed_in = (in_buf[0] & 2) == 0;
+ if (in_len && fixed == fixed_in) {
+ memcpy(buf, in_buf, MIN(len, in_len));
+ return MIN(len, in_len);
}
- memset(buf, 0, len);
- if (fixed) {
- /* Return fixed format sense buffer */
- buf[0] = 0x70;
- buf[2] = sense.key;
- buf[7] = 10;
- buf[12] = sense.asc;
- buf[13] = sense.ascq;
- return MIN(len, SCSI_SENSE_LEN);
+ if (in_len == 0) {
+ sense = SENSE_CODE(NO_SENSE);
} else {
- /* Return descriptor format sense buffer */
- buf[0] = 0x72;
- buf[1] = sense.key;
- buf[2] = sense.asc;
- buf[3] = sense.ascq;
- return 8;
+ sense = scsi_parse_sense_buf(in_buf, in_len);
}
+ return scsi_build_sense_buf(buf, len, sense, fixed);
}
int scsi_sense_to_errno(int key, int asc, int ascq)
{
switch (key) {
- case 0x00: /* NO SENSE */
- case 0x01: /* RECOVERED ERROR */
- case 0x06: /* UNIT ATTENTION */
+ case NO_SENSE:
+ case RECOVERED_ERROR:
+ case UNIT_ATTENTION:
/* These sense keys are not errors */
return 0;
- case 0x0b: /* COMMAND ABORTED */
+ case ABORTED_COMMAND: /* COMMAND ABORTED */
return ECANCELED;
- case 0x02: /* NOT READY */
- case 0x05: /* ILLEGAL REQUEST */
- case 0x07: /* DATA PROTECTION */
+ case NOT_READY:
+ case ILLEGAL_REQUEST:
+ case DATA_PROTECT:
/* Parse ASCQ */
break;
default:
@@ -356,34 +380,15 @@ int scsi_sense_to_errno(int key, int asc, int ascq)
}
}
-int scsi_sense_buf_to_errno(const uint8_t *sense, size_t sense_size)
+int scsi_sense_buf_to_errno(const uint8_t *in_buf, size_t in_len)
{
- int key, asc, ascq;
- if (sense_size < 1) {
- return EIO;
- }
- switch (sense[0]) {
- case 0x70: /* Fixed format sense data. */
- if (sense_size < 14) {
- return EIO;
- }
- key = sense[2] & 0xF;
- asc = sense[12];
- ascq = sense[13];
- break;
- case 0x72: /* Descriptor format sense data. */
- if (sense_size < 4) {
- return EIO;
- }
- key = sense[1] & 0xF;
- asc = sense[2];
- ascq = sense[3];
- break;
- default:
+ SCSISense sense;
+ if (in_len < 1) {
return EIO;
- break;
}
- return scsi_sense_to_errno(key, asc, ascq);
+
+ sense = scsi_parse_sense_buf(in_buf, in_len);
+ return scsi_sense_to_errno(sense.key, sense.asc, sense.ascq);
}
const char *scsi_command_name(uint8_t cmd)
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
index 3c06baa93a..430eea470b 100644
--- a/target/alpha/mem_helper.c
+++ b/target/alpha/mem_helper.c
@@ -34,9 +34,7 @@ void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
uint64_t pc;
uint32_t insn;
- if (retaddr) {
- cpu_restore_state(cs, retaddr);
- }
+ cpu_restore_state(cs, retaddr);
pc = env->pc;
insn = cpu_ldl_code(env, pc);
@@ -58,9 +56,7 @@ void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
AlphaCPU *cpu = ALPHA_CPU(cs);
CPUAlphaState *env = &cpu->env;
- if (retaddr) {
- cpu_restore_state(cs, retaddr);
- }
+ cpu_restore_state(cs, retaddr);
env->trap_arg0 = addr;
env->trap_arg1 = access_type == MMU_DATA_STORE ? 1 : 0;
@@ -80,11 +76,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = alpha_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret != 0)) {
- if (retaddr) {
- cpu_restore_state(cs, retaddr);
- }
/* Exception index and error code are already set */
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, retaddr);
}
}
#endif /* CONFIG_USER_ONLY */
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 629f35ec8e..73a1b5e63e 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -156,7 +156,7 @@ void alpha_translate_init(void)
static TCGv load_zero(DisasContext *ctx)
{
- if (TCGV_IS_UNUSED_I64(ctx->zero)) {
+ if (!ctx->zero) {
ctx->zero = tcg_const_i64(0);
}
return ctx->zero;
@@ -164,7 +164,7 @@ static TCGv load_zero(DisasContext *ctx)
static TCGv dest_sink(DisasContext *ctx)
{
- if (TCGV_IS_UNUSED_I64(ctx->sink)) {
+ if (!ctx->sink) {
ctx->sink = tcg_temp_new();
}
return ctx->sink;
@@ -172,18 +172,18 @@ static TCGv dest_sink(DisasContext *ctx)
static void free_context_temps(DisasContext *ctx)
{
- if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
+ if (ctx->sink) {
tcg_gen_discard_i64(ctx->sink);
tcg_temp_free(ctx->sink);
- TCGV_UNUSED_I64(ctx->sink);
+ ctx->sink = NULL;
}
- if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
+ if (ctx->zero) {
tcg_temp_free(ctx->zero);
- TCGV_UNUSED_I64(ctx->zero);
+ ctx->zero = NULL;
}
- if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
+ if (ctx->lit) {
tcg_temp_free(ctx->lit);
- TCGV_UNUSED_I64(ctx->lit);
+ ctx->lit = NULL;
}
}
@@ -2948,9 +2948,9 @@ static int alpha_tr_init_disas_context(DisasContextBase *dcbase,
/* Similarly for flush-to-zero. */
ctx->tb_ftz = -1;
- TCGV_UNUSED_I64(ctx->zero);
- TCGV_UNUSED_I64(ctx->sink);
- TCGV_UNUSED_I64(ctx->lit);
+ ctx->zero = NULL;
+ ctx->sink = NULL;
+ ctx->lit = NULL;
/* Bound the number of insns to execute to those left on the page. */
if (in_superpage(ctx, ctx->base.pc_first)) {
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 7f7a3d1e32..cc1856c32b 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -705,9 +705,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
CPUARMState *env = &cpu->env;
int pagebits;
Error *local_err = NULL;
-#ifndef CONFIG_USER_ONLY
- AddressSpace *as;
-#endif
cpu_exec_realizefn(cs, &local_err);
if (local_err != NULL) {
@@ -912,21 +909,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- as = g_new0(AddressSpace, 1);
-
cs->num_ases = 2;
if (!cpu->secure_memory) {
cpu->secure_memory = cs->memory;
}
- address_space_init(as, cpu->secure_memory, "cpu-secure-memory");
- cpu_address_space_init(cs, as, ARMASIdx_S);
+ cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
+ cpu->secure_memory);
} else {
cs->num_ases = 1;
}
- as = g_new0(AddressSpace, 1);
- address_space_init(as, cs->memory, "cpu-memory");
- cpu_address_space_init(cs, as, ARMASIdx_NS);
+ cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
#endif
qemu_init_vcpu(cs);
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 89d49cdcb2..96316700dd 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -112,7 +112,7 @@ enum {
#define ARM_CPU_VIRQ 2
#define ARM_CPU_VFIQ 3
-#define NB_MMU_MODES 7
+#define NB_MMU_MODES 8
/* ARM-specific extra insn start words:
* 1: Conditional execution bits
* 2: Partial exception syndrome for data aborts
@@ -2226,13 +2226,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* They have the following different MMU indexes:
* User
* Privileged
- * Execution priority negative (this is like privileged, but the
- * MPU HFNMIENA bit means that it may have different access permission
- * check results to normal privileged code, so can't share a TLB).
+ * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
+ * Privileged, execution priority negative (ditto)
* If the CPU supports the v8M Security Extension then there are also:
* Secure User
* Secure Privileged
- * Secure, execution priority negative
+ * Secure User, execution priority negative
+ * Secure Privileged, execution priority negative
*
* The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
* are not quite the same -- different CPU types (most notably M profile
@@ -2251,11 +2251,18 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* The constant names here are patterned after the general style of the names
* of the AT/ATS operations.
* The values used are carefully arranged to make mmu_idx => EL lookup easy.
+ * For M profile we arrange them to have a bit for priv, a bit for negpri
+ * and a bit for secure.
*/
#define ARM_MMU_IDX_A 0x10 /* A profile */
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
#define ARM_MMU_IDX_M 0x40 /* M profile */
+/* meanings of the bits for M profile mmu idx values */
+#define ARM_MMU_IDX_M_PRIV 0x1
+#define ARM_MMU_IDX_M_NEGPRI 0x2
+#define ARM_MMU_IDX_M_S 0x4
+
#define ARM_MMU_IDX_TYPE_MASK (~0x7)
#define ARM_MMU_IDX_COREIDX_MASK 0x7
@@ -2269,10 +2276,12 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
- ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M,
- ARMMMUIdx_MSUser = 3 | ARM_MMU_IDX_M,
- ARMMMUIdx_MSPriv = 4 | ARM_MMU_IDX_M,
- ARMMMUIdx_MSNegPri = 5 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MPrivNegPri = 3 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MSUser = 4 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MSPriv = 5 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MSUserNegPri = 6 | ARM_MMU_IDX_M,
+ ARMMMUIdx_MSPrivNegPri = 7 | ARM_MMU_IDX_M,
/* Indexes below here don't have TLBs and are used only for AT system
* instructions or for the first stage of an S12 page table walk.
*/
@@ -2293,10 +2302,12 @@ typedef enum ARMMMUIdxBit {
ARMMMUIdxBit_S2NS = 1 << 6,
ARMMMUIdxBit_MUser = 1 << 0,
ARMMMUIdxBit_MPriv = 1 << 1,
- ARMMMUIdxBit_MNegPri = 1 << 2,
- ARMMMUIdxBit_MSUser = 1 << 3,
- ARMMMUIdxBit_MSPriv = 1 << 4,
- ARMMMUIdxBit_MSNegPri = 1 << 5,
+ ARMMMUIdxBit_MUserNegPri = 1 << 2,
+ ARMMMUIdxBit_MPrivNegPri = 1 << 3,
+ ARMMMUIdxBit_MSUser = 1 << 4,
+ ARMMMUIdxBit_MSPriv = 1 << 5,
+ ARMMMUIdxBit_MSUserNegPri = 1 << 6,
+ ARMMMUIdxBit_MSPrivNegPri = 1 << 7,
} ARMMMUIdxBit;
#define MMU_USER_IDX 0
@@ -2322,33 +2333,45 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
case ARM_MMU_IDX_A:
return mmu_idx & 3;
case ARM_MMU_IDX_M:
- return (mmu_idx == ARMMMUIdx_MUser || mmu_idx == ARMMMUIdx_MSUser)
- ? 0 : 1;
+ return mmu_idx & ARM_MMU_IDX_M_PRIV;
default:
g_assert_not_reached();
}
}
-/* Return the MMU index for a v7M CPU in the specified security state */
-static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env,
- bool secstate)
+/* Return the MMU index for a v7M CPU in the specified security and
+ * privilege state
+ */
+static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
+ bool secstate,
+ bool priv)
{
- int el = arm_current_el(env);
- ARMMMUIdx mmu_idx;
+ ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
- if (el == 0) {
- mmu_idx = secstate ? ARMMMUIdx_MSUser : ARMMMUIdx_MUser;
- } else {
- mmu_idx = secstate ? ARMMMUIdx_MSPriv : ARMMMUIdx_MPriv;
+ if (priv) {
+ mmu_idx |= ARM_MMU_IDX_M_PRIV;
}
if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) {
- mmu_idx = secstate ? ARMMMUIdx_MSNegPri : ARMMMUIdx_MNegPri;
+ mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
+ }
+
+ if (secstate) {
+ mmu_idx |= ARM_MMU_IDX_M_S;
}
return mmu_idx;
}
+/* Return the MMU index for a v7M CPU in the specified security state */
+static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env,
+ bool secstate)
+{
+ bool priv = arm_current_el(env) != 0;
+
+ return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
+}
+
/* Determine the current mmu_idx to use for normal loads/stores */
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 91a9300f11..d1395f9b73 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -28,13 +28,13 @@ typedef struct ARMCacheAttrs {
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, uint32_t *fsr,
+ target_ulong *page_size,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
- target_ulong *page_size_ptr, uint32_t *fsr,
+ target_ulong *page_size_ptr,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
/* Security attributes for an address, as returned by v8m_security_lookup. */
@@ -2160,20 +2160,44 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
hwaddr phys_addr;
target_ulong page_size;
int prot;
- uint32_t fsr;
bool ret;
uint64_t par64;
+ bool format64 = false;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
ARMCacheAttrs cacheattrs = {};
ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
- &prot, &page_size, &fsr, &fi, &cacheattrs);
- if (arm_s1_regime_using_lpae_format(env, mmu_idx)) {
- /* fsr is a DFSR/IFSR value for the long descriptor
- * translation table format, but with WnR always clear.
- * Convert it to a 64-bit PAR.
+ &prot, &page_size, &fi, &cacheattrs);
+
+ if (is_a64(env)) {
+ format64 = true;
+ } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ /*
+ * ATS1Cxx:
+ * * TTBCR.EAE determines whether the result is returned using the
+ * 32-bit or the 64-bit PAR format
+ * * Instructions executed in Hyp mode always use the 64bit format
+ *
+ * ATS1S2NSOxx uses the 64bit format if any of the following is true:
+ * * The Non-secure TTBCR.EAE bit is set to 1
+ * * The implementation includes EL2, and the value of HCR.VM is 1
+ *
+ * ATS1Hx always uses the 64bit format (not supported yet).
*/
+ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ format64 |= env->cp15.hcr_el2 & HCR_VM;
+ } else {
+ format64 |= arm_current_el(env) == 2;
+ }
+ }
+ }
+
+ if (format64) {
+ /* Create a 64-bit PAR */
par64 = (1 << 11); /* LPAE bit always set */
if (!ret) {
par64 |= phys_addr & ~0xfffULL;
@@ -2183,6 +2207,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
par64 |= cacheattrs.shareability << 7; /* SH */
} else {
+ uint32_t fsr = arm_fi_to_lfsc(&fi);
+
par64 |= 1; /* F */
par64 |= (fsr & 0x3f) << 1; /* FS */
/* Note that S2WLK and FSTAGE are always zero, because we don't
@@ -2207,6 +2233,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
par64 |= (1 << 9); /* NS */
}
} else {
+ uint32_t fsr = arm_fi_to_sfsc(&fi);
+
par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
((fsr & 0xf) << 1) | 1;
}
@@ -5947,6 +5975,28 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
g_assert_not_reached();
}
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* The TT instructions can be used by unprivileged code, but in
+ * user-only emulation we don't have the MPU.
+ * Luckily since we know we are NonSecure unprivileged (and that in
+ * turn means that the A flag wasn't specified), all the bits in the
+ * register must be zero:
+ * IREGION: 0 because IRVALID is 0
+ * IRVALID: 0 because NS
+ * S: 0 because NS
+ * NSRW: 0 because NS
+ * NSR: 0 because NS
+ * RW: 0 because unpriv and A flag not set
+ * R: 0 because unpriv and A flag not set
+ * SRVALID: 0 because NS
+ * MRVALID: 0 because unpriv and A flag not set
+ * SREGION: 0 becaus SRVALID is 0
+ * MREGION: 0 because MRVALID is 0
+ */
+ return 0;
+}
+
void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -6955,7 +7005,6 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
target_ulong page_size;
hwaddr physaddr;
int prot;
- uint32_t fsr;
v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
if (!sattrs.nsc || sattrs.ns) {
@@ -6969,7 +7018,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
return false;
}
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
- &physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) {
+ &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
/* the MPU lookup failed */
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
@@ -7856,11 +7905,13 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1SE1:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_S1NSE1:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
case ARMMMUIdx_MSUser:
return 1;
default:
@@ -7883,8 +7934,7 @@ static inline bool regime_translation_disabled(CPUARMState *env,
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
case R_V7M_MPU_CTRL_ENABLE_MASK:
/* Enabled, but not for HardFault and NMI */
- return mmu_idx == ARMMMUIdx_MNegPri ||
- mmu_idx == ARMMMUIdx_MSNegPri;
+ return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
/* Enabled for all cases */
return false;
@@ -8016,6 +8066,9 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MSUserNegPri:
return true;
default:
return false;
@@ -8240,7 +8293,6 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
/* Translate a S1 pagetable walk through S2 if needed. */
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr addr, MemTxAttrs txattrs,
- uint32_t *fsr,
ARMMMUFaultInfo *fi)
{
if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
@@ -8251,7 +8303,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
int ret;
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
- &txattrs, &s2prot, &s2size, fsr, fi, NULL);
+ &txattrs, &s2prot, &s2size, fi, NULL);
if (ret) {
fi->s2addr = addr;
fi->stage2 = true;
@@ -8271,8 +8323,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
* (but not if it was for a debug access).
*/
static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
- ARMMMUIdx mmu_idx, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -8281,7 +8332,7 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
attrs.secure = is_secure;
as = arm_addressspace(cs, attrs);
- addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
+ addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
if (fi->s1ptw) {
return 0;
}
@@ -8293,8 +8344,7 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
}
static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
- ARMMMUIdx mmu_idx, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -8303,7 +8353,7 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
attrs.secure = is_secure;
as = arm_addressspace(cs, attrs);
- addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
+ addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
if (fi->s1ptw) {
return 0;
}
@@ -8317,11 +8367,11 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
- target_ulong *page_size, uint32_t *fsr,
+ target_ulong *page_size,
ARMMMUFaultInfo *fi)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
- int code;
+ int level = 1;
uint32_t table;
uint32_t desc;
int type;
@@ -8335,11 +8385,11 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
/* Lookup l1 descriptor. */
if (!get_level1_table_address(env, mmu_idx, &table, address)) {
/* Section translation fault if page walk is disabled by PD0 or PD1 */
- code = 5;
+ fi->type = ARMFault_Translation;
goto do_fault;
}
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fsr, fi);
+ mmu_idx, fi);
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
if (regime_el(env, mmu_idx) == 1) {
@@ -8350,21 +8400,20 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
domain_prot = (dacr >> (domain * 2)) & 3;
if (type == 0) {
/* Section translation fault. */
- code = 5;
+ fi->type = ARMFault_Translation;
goto do_fault;
}
+ if (type != 2) {
+ level = 2;
+ }
if (domain_prot == 0 || domain_prot == 2) {
- if (type == 2)
- code = 9; /* Section domain fault. */
- else
- code = 11; /* Page domain fault. */
+ fi->type = ARMFault_Domain;
goto do_fault;
}
if (type == 2) {
/* 1Mb section. */
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
ap = (desc >> 10) & 3;
- code = 13;
*page_size = 1024 * 1024;
} else {
/* Lookup l2 entry. */
@@ -8376,10 +8425,10 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
}
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fsr, fi);
+ mmu_idx, fi);
switch (desc & 3) {
case 0: /* Page translation fault. */
- code = 7;
+ fi->type = ARMFault_Translation;
goto do_fault;
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
@@ -8402,7 +8451,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
/* UNPREDICTABLE in ARMv5; we choose to take a
* page translation fault.
*/
- code = 7;
+ fi->type = ARMFault_Translation;
goto do_fault;
}
} else {
@@ -8415,29 +8464,29 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
/* Never happens, but compiler isn't smart enough to tell. */
abort();
}
- code = 15;
}
*prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
*prot |= *prot ? PAGE_EXEC : 0;
if (!(*prot & (1 << access_type))) {
/* Access permission fault. */
+ fi->type = ARMFault_Permission;
goto do_fault;
}
*phys_ptr = phys_addr;
return false;
do_fault:
- *fsr = code | (domain << 4);
+ fi->domain = domain;
+ fi->level = level;
return true;
}
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ target_ulong *page_size, ARMMMUFaultInfo *fi)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
- int code;
+ int level = 1;
uint32_t table;
uint32_t desc;
uint32_t xn;
@@ -8454,17 +8503,17 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
/* Lookup l1 descriptor. */
if (!get_level1_table_address(env, mmu_idx, &table, address)) {
/* Section translation fault if page walk is disabled by PD0 or PD1 */
- code = 5;
+ fi->type = ARMFault_Translation;
goto do_fault;
}
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fsr, fi);
+ mmu_idx, fi);
type = (desc & 3);
if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
/* Section translation fault, or attempt to use the encoding
* which is Reserved on implementations without PXN.
*/
- code = 5;
+ fi->type = ARMFault_Translation;
goto do_fault;
}
if ((type == 1) || !(desc & (1 << 18))) {
@@ -8476,13 +8525,13 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
} else {
dacr = env->cp15.dacr_s;
}
+ if (type == 1) {
+ level = 2;
+ }
domain_prot = (dacr >> (domain * 2)) & 3;
if (domain_prot == 0 || domain_prot == 2) {
- if (type != 1) {
- code = 9; /* Section domain fault. */
- } else {
- code = 11; /* Page domain fault. */
- }
+ /* Section or Page domain fault */
+ fi->type = ARMFault_Domain;
goto do_fault;
}
if (type != 1) {
@@ -8500,7 +8549,6 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
xn = desc & (1 << 4);
pxn = desc & 1;
- code = 13;
ns = extract32(desc, 19, 1);
} else {
if (arm_feature(env, ARM_FEATURE_PXN)) {
@@ -8510,11 +8558,11 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
/* Lookup l2 entry. */
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
- mmu_idx, fsr, fi);
+ mmu_idx, fi);
ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
switch (desc & 3) {
case 0: /* Page translation fault. */
- code = 7;
+ fi->type = ARMFault_Translation;
goto do_fault;
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
@@ -8530,7 +8578,6 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
/* Never happens, but compiler isn't smart enough to tell. */
abort();
}
- code = 15;
}
if (domain_prot == 3) {
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
@@ -8538,15 +8585,17 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
if (pxn && !regime_is_user(env, mmu_idx)) {
xn = 1;
}
- if (xn && access_type == MMU_INST_FETCH)
+ if (xn && access_type == MMU_INST_FETCH) {
+ fi->type = ARMFault_Permission;
goto do_fault;
+ }
if (arm_feature(env, ARM_FEATURE_V6K) &&
(regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
/* The simplified model uses AP[0] as an access control bit. */
if ((ap & 1) == 0) {
/* Access flag fault. */
- code = (code == 15) ? 6 : 3;
+ fi->type = ARMFault_AccessFlag;
goto do_fault;
}
*prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
@@ -8558,6 +8607,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
}
if (!(*prot & (1 << access_type))) {
/* Access permission fault. */
+ fi->type = ARMFault_Permission;
goto do_fault;
}
}
@@ -8571,19 +8621,11 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
*phys_ptr = phys_addr;
return false;
do_fault:
- *fsr = code | (domain << 4);
+ fi->domain = domain;
+ fi->level = level;
return true;
}
-/* Fault type for long-descriptor MMU fault reporting; this corresponds
- * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
- */
-typedef enum {
- translation_fault = 1,
- access_fault = 2,
- permission_fault = 3,
-} MMUFaultType;
-
/*
* check_s2_mmu_setup
* @cpu: ARMCPU
@@ -8685,13 +8727,13 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
- target_ulong *page_size_ptr, uint32_t *fsr,
+ target_ulong *page_size_ptr,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
/* Read an LPAE long-descriptor translation table. */
- MMUFaultType fault_type = translation_fault;
+ ARMFaultType fault_type = ARMFault_Translation;
uint32_t level;
uint32_t epd = 0;
int32_t t0sz, t1sz;
@@ -8801,7 +8843,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
ttbr_select = 1;
} else {
/* in the gap between the two regions, this is a Translation fault */
- fault_type = translation_fault;
+ fault_type = ARMFault_Translation;
goto do_fault;
}
@@ -8887,7 +8929,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
inputsize, stride);
if (!ok) {
- fault_type = translation_fault;
+ fault_type = ARMFault_Translation;
goto do_fault;
}
level = startlevel;
@@ -8921,7 +8963,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
descaddr |= (address >> (stride * (4 - level))) & indexmask;
descaddr &= ~7ULL;
nstable = extract32(tableattrs, 4, 1);
- descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
+ descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
if (fi->s1ptw) {
goto do_fault;
}
@@ -8973,7 +9015,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
/* Here descaddr is the final physical address, and attributes
* are all in attrs.
*/
- fault_type = access_fault;
+ fault_type = ARMFault_AccessFlag;
if ((attrs & (1 << 8)) == 0) {
/* Access flag */
goto do_fault;
@@ -8991,7 +9033,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
*prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
}
- fault_type = permission_fault;
+ fault_type = ARMFault_Permission;
if (!(*prot & (1 << access_type))) {
goto do_fault;
}
@@ -9023,8 +9065,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
return false;
do_fault:
- /* Long-descriptor format IFSR/DFSR value */
- *fsr = (1 << 9) | (fault_type << 2) | level;
+ fi->type = fault_type;
+ fi->level = level;
/* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
return true;
@@ -9108,7 +9150,8 @@ static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot, uint32_t *fsr)
+ hwaddr *phys_ptr, int *prot,
+ ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = arm_env_get_cpu(env);
int n;
@@ -9203,7 +9246,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
if (n == -1) { /* no hits */
if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
/* background fault */
- *fsr = 0;
+ fi->type = ARMFault_Background;
return true;
}
get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
@@ -9261,7 +9304,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
}
}
- *fsr = 0x00d; /* Permission fault */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return !(*prot & (1 << access_type));
}
@@ -9344,67 +9388,28 @@ static void v8m_security_lookup(CPUARMState *env, uint32_t address,
}
}
-static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
- int *prot, uint32_t *fsr)
+static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, ARMMMUFaultInfo *fi, uint32_t *mregion)
{
+ /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ * that a full phys-to-virt translation does).
+ * mregion is (if not NULL) set to the region number which matched,
+ * or -1 if no region number is returned (MPU off, address did not
+ * hit a region, address hit in multiple regions).
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
bool is_user = regime_is_user(env, mmu_idx);
uint32_t secure = regime_is_secure(env, mmu_idx);
int n;
int matchregion = -1;
bool hit = false;
- V8M_SAttributes sattrs = {};
*phys_ptr = address;
*prot = 0;
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
- if (access_type == MMU_INST_FETCH) {
- /* Instruction fetches always use the MMU bank and the
- * transaction attribute determined by the fetch address,
- * regardless of CPU state. This is painful for QEMU
- * to handle, because it would mean we need to encode
- * into the mmu_idx not just the (user, negpri) information
- * for the current security state but also that for the
- * other security state, which would balloon the number
- * of mmu_idx values needed alarmingly.
- * Fortunately we can avoid this because it's not actually
- * possible to arbitrarily execute code from memory with
- * the wrong security attribute: it will always generate
- * an exception of some kind or another, apart from the
- * special case of an NS CPU executing an SG instruction
- * in S&NSC memory. So we always just fail the translation
- * here and sort things out in the exception handler
- * (including possibly emulating an SG instruction).
- */
- if (sattrs.ns != !secure) {
- *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT;
- return true;
- }
- } else {
- /* For data accesses we always use the MMU bank indicated
- * by the current CPU state, but the security attributes
- * might downgrade a secure access to nonsecure.
- */
- if (sattrs.ns) {
- txattrs->secure = false;
- } else if (!secure) {
- /* NS access to S memory must fault.
- * Architecturally we should first check whether the
- * MPU information for this address indicates that we
- * are doing an unaligned access to Device memory, which
- * should generate a UsageFault instead. QEMU does not
- * currently check for that kind of unaligned access though.
- * If we added it we would need to do so as a special case
- * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
- */
- *fsr = M_FAKE_FSR_SFAULT;
- return true;
- }
- }
+ if (mregion) {
+ *mregion = -1;
}
/* Unlike the ARM ARM pseudocode, we don't need to check whether this
@@ -9442,7 +9447,8 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
/* Multiple regions match -- always a failure (unlike
* PMSAv7 where highest-numbered-region wins)
*/
- *fsr = 0x00d; /* permission fault */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return true;
}
@@ -9470,7 +9476,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
if (!hit) {
/* background fault */
- *fsr = 0;
+ fi->type = ARMFault_Background;
return true;
}
@@ -9493,15 +9499,88 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
/* We don't need to look the attribute up in the MAIR0/MAIR1
* registers because that only tells us about cacheability.
*/
+ if (mregion) {
+ *mregion = matchregion;
+ }
}
- *fsr = 0x00d; /* Permission fault */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return !(*prot & (1 << access_type));
}
+
+static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, ARMMMUFaultInfo *fi)
+{
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ V8M_SAttributes sattrs = {};
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
+ if (access_type == MMU_INST_FETCH) {
+ /* Instruction fetches always use the MMU bank and the
+ * transaction attribute determined by the fetch address,
+ * regardless of CPU state. This is painful for QEMU
+ * to handle, because it would mean we need to encode
+ * into the mmu_idx not just the (user, negpri) information
+ * for the current security state but also that for the
+ * other security state, which would balloon the number
+ * of mmu_idx values needed alarmingly.
+ * Fortunately we can avoid this because it's not actually
+ * possible to arbitrarily execute code from memory with
+ * the wrong security attribute: it will always generate
+ * an exception of some kind or another, apart from the
+ * special case of an NS CPU executing an SG instruction
+ * in S&NSC memory. So we always just fail the translation
+ * here and sort things out in the exception handler
+ * (including possibly emulating an SG instruction).
+ */
+ if (sattrs.ns != !secure) {
+ if (sattrs.nsc) {
+ fi->type = ARMFault_QEMU_NSCExec;
+ } else {
+ fi->type = ARMFault_QEMU_SFault;
+ }
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ } else {
+ /* For data accesses we always use the MMU bank indicated
+ * by the current CPU state, but the security attributes
+ * might downgrade a secure access to nonsecure.
+ */
+ if (sattrs.ns) {
+ txattrs->secure = false;
+ } else if (!secure) {
+ /* NS access to S memory must fault.
+ * Architecturally we should first check whether the
+ * MPU information for this address indicates that we
+ * are doing an unaligned access to Device memory, which
+ * should generate a UsageFault instead. QEMU does not
+ * currently check for that kind of unaligned access though.
+ * If we added it we would need to do so as a special case
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
+ */
+ fi->type = ARMFault_QEMU_SFault;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ }
+ }
+
+ return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
+ txattrs, prot, fi, NULL);
+}
+
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot, uint32_t *fsr)
+ hwaddr *phys_ptr, int *prot,
+ ARMMMUFaultInfo *fi)
{
int n;
uint32_t mask;
@@ -9530,7 +9609,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
}
}
if (n < 0) {
- *fsr = 2;
+ fi->type = ARMFault_Background;
return true;
}
@@ -9542,11 +9621,13 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
mask = (mask >> (n * 4)) & 0xf;
switch (mask) {
case 0:
- *fsr = 1;
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return true;
case 1:
if (is_user) {
- *fsr = 1;
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return true;
}
*prot = PAGE_READ | PAGE_WRITE;
@@ -9562,7 +9643,8 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
break;
case 5:
if (is_user) {
- *fsr = 1;
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return true;
}
*prot = PAGE_READ;
@@ -9572,7 +9654,8 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
break;
default:
/* Bad permission. */
- *fsr = 1;
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
return true;
}
*prot |= PAGE_EXEC;
@@ -9689,14 +9772,13 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
* @attrs: set to the memory transaction attributes to use
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
- * @fsr: set to the DFSR/IFSR value on failure
* @fi: set to fault info if the translation fails
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, uint32_t *fsr,
+ target_ulong *page_size,
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
@@ -9711,7 +9793,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
ret = get_phys_addr(env, address, access_type,
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
- prot, page_size, fsr, fi, cacheattrs);
+ prot, page_size, fi, cacheattrs);
/* If S1 fails or S2 is disabled, return early. */
if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
@@ -9722,7 +9804,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
/* S1 is done. Now do S2 translation. */
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
phys_ptr, attrs, &s2_prot,
- page_size, fsr, fi,
+ page_size, fi,
cacheattrs != NULL ? &cacheattrs2 : NULL);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
@@ -9768,15 +9850,15 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
if (arm_feature(env, ARM_FEATURE_V8)) {
/* PMSAv8 */
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
- phys_ptr, attrs, prot, fsr);
+ phys_ptr, attrs, prot, fi);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
/* PMSAv7 */
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
+ phys_ptr, prot, fi);
} else {
/* Pre-v7 MPU */
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
+ phys_ptr, prot, fi);
}
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
" mmu_idx %u -> %s (prot %c%c%c)\n",
@@ -9802,14 +9884,15 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
}
if (regime_using_lpae_format(env, mmu_idx)) {
- return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr, fi, cacheattrs);
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size,
+ fi, cacheattrs);
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
- return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr, fi);
+ return get_phys_addr_v6(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, page_size, fi);
} else {
- return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
- prot, page_size, fsr, fi);
+ return get_phys_addr_v5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, page_size, fi);
}
}
@@ -9818,7 +9901,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
* fsr with ARM DFSR/IFSR fault register format value on failure.
*/
bool arm_tlb_fill(CPUState *cs, vaddr address,
- MMUAccessType access_type, int mmu_idx, uint32_t *fsr,
+ MMUAccessType access_type, int mmu_idx,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -9831,7 +9914,7 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
ret = get_phys_addr(env, address, access_type,
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fsr, fi, NULL);
+ &attrs, &prot, &page_size, fi, NULL);
if (!ret) {
/* Map a single [sub]page. */
phys_addr &= TARGET_PAGE_MASK;
@@ -9853,14 +9936,13 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
target_ulong page_size;
int prot;
bool ret;
- uint32_t fsr;
ARMMMUFaultInfo fi = {};
ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
*attrs = (MemTxAttrs) {};
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fsr, &fi, NULL);
+ attrs, &prot, &page_size, &fi, NULL);
if (ret) {
return -1;
@@ -9953,11 +10035,9 @@ uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
switch (reg) {
case 8: /* MSP */
- return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
- env->v7m.other_sp : env->regs[13];
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
case 9: /* PSP */
- return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
- env->regs[13] : env->v7m.other_sp;
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
case 16: /* PRIMASK */
return env->v7m.primask[env->v7m.secure];
case 17: /* BASEPRI */
@@ -10059,14 +10139,14 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
}
break;
case 8: /* MSP */
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ if (v7m_using_psp(env)) {
env->v7m.other_sp = val;
} else {
env->regs[13] = val;
}
break;
case 9: /* PSP */
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ if (v7m_using_psp(env)) {
env->regs[13] = val;
} else {
env->v7m.other_sp = val;
@@ -10093,8 +10173,11 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
* thread mode; other bits can be updated by any privileged code.
* write_v7m_control_spsel() deals with updating the SPSEL bit in
* env->v7m.control, so we only need update the others.
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
+ * mode; for v8M the write is permitted but will have no effect.
*/
- if (!arm_v7m_is_handler_mode(env)) {
+ if (arm_feature(env, ARM_FEATURE_V8) ||
+ !arm_v7m_is_handler_mode(env)) {
write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
@@ -10107,6 +10190,92 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
}
}
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
+ bool forceunpriv = op & 1;
+ bool alt = op & 2;
+ V8M_SAttributes sattrs = {};
+ uint32_t tt_resp;
+ bool r, rw, nsr, nsrw, mrvalid;
+ int prot;
+ ARMMMUFaultInfo fi = {};
+ MemTxAttrs attrs = {};
+ hwaddr phys_addr;
+ ARMMMUIdx mmu_idx;
+ uint32_t mregion;
+ bool targetpriv;
+ bool targetsec = env->v7m.secure;
+
+ /* Work out what the security state and privilege level we're
+ * interested in is...
+ */
+ if (alt) {
+ targetsec = !targetsec;
+ }
+
+ if (forceunpriv) {
+ targetpriv = false;
+ } else {
+ targetpriv = arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
+ }
+
+ /* ...and then figure out which MMU index this is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
+
+ /* We know that the MPU and SAU don't care about the access type
+ * for our purposes beyond that we don't want to claim to be
+ * an insn fetch, so we arbitrarily call this a read.
+ */
+
+ /* MPU region info only available for privileged or if
+ * inspecting the other MPU state.
+ */
+ if (arm_current_el(env) != 0 || alt) {
+ /* We can ignore the return value as prot is always set */
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
+ &phys_addr, &attrs, &prot, &fi, &mregion);
+ if (mregion == -1) {
+ mrvalid = false;
+ mregion = 0;
+ } else {
+ mrvalid = true;
+ }
+ r = prot & PAGE_READ;
+ rw = prot & PAGE_WRITE;
+ } else {
+ r = false;
+ rw = false;
+ mrvalid = false;
+ mregion = 0;
+ }
+
+ if (env->v7m.secure) {
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
+ nsr = sattrs.ns && r;
+ nsrw = sattrs.ns && rw;
+ } else {
+ sattrs.ns = true;
+ nsr = false;
+ nsrw = false;
+ }
+
+ tt_resp = (sattrs.iregion << 24) |
+ (sattrs.irvalid << 23) |
+ ((!sattrs.ns) << 22) |
+ (nsrw << 21) |
+ (nsr << 20) |
+ (rw << 19) |
+ (r << 18) |
+ (sattrs.srvalid << 17) |
+ (mrvalid << 16) |
+ (sattrs.sregion << 8) |
+ mregion;
+
+ return tt_resp;
+}
+
#endif
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 439d228420..066729e8ad 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -66,6 +66,8 @@ DEF_HELPER_2(v7m_mrs, i32, env, i32)
DEF_HELPER_2(v7m_bxns, void, env, i32)
DEF_HELPER_2(v7m_blxns, void, env, i32)
+DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
+
DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
DEF_HELPER_2(get_cp_reg, i32, env, ptr)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index d9cc75e4c5..876854d876 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -488,7 +488,39 @@ static inline void arm_clear_exclusive(CPUARMState *env)
}
/**
+ * ARMFaultType: type of an ARM MMU fault
+ * This corresponds to the v8A pseudocode's Fault enumeration,
+ * with extensions for QEMU internal conditions.
+ */
+typedef enum ARMFaultType {
+ ARMFault_None,
+ ARMFault_AccessFlag,
+ ARMFault_Alignment,
+ ARMFault_Background,
+ ARMFault_Domain,
+ ARMFault_Permission,
+ ARMFault_Translation,
+ ARMFault_AddressSize,
+ ARMFault_SyncExternal,
+ ARMFault_SyncExternalOnWalk,
+ ARMFault_SyncParity,
+ ARMFault_SyncParityOnWalk,
+ ARMFault_AsyncParity,
+ ARMFault_AsyncExternal,
+ ARMFault_Debug,
+ ARMFault_TLBConflict,
+ ARMFault_Lockdown,
+ ARMFault_Exclusive,
+ ARMFault_ICacheMaint,
+ ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
+ ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
+} ARMFaultType;
+
+/**
* ARMMMUFaultInfo: Information describing an ARM MMU Fault
+ * @type: Type of fault
+ * @level: Table walk level (for translation, access flag and permission faults)
+ * @domain: Domain of the fault address (for non-LPAE CPUs only)
* @s2addr: Address that caused a fault at stage 2
* @stage2: True if we faulted at stage 2
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
@@ -496,16 +528,169 @@ static inline void arm_clear_exclusive(CPUARMState *env)
*/
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
struct ARMMMUFaultInfo {
+ ARMFaultType type;
target_ulong s2addr;
+ int level;
+ int domain;
bool stage2;
bool s1ptw;
bool ea;
};
+/**
+ * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
+ * Compare pseudocode EncodeSDFSC(), though unlike that function
+ * we set up a whole FSR-format code including domain field and
+ * putting the high bit of the FSC into bit 10.
+ */
+static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
+{
+ uint32_t fsc;
+
+ switch (fi->type) {
+ case ARMFault_None:
+ return 0;
+ case ARMFault_AccessFlag:
+ fsc = fi->level == 1 ? 0x3 : 0x6;
+ break;
+ case ARMFault_Alignment:
+ fsc = 0x1;
+ break;
+ case ARMFault_Permission:
+ fsc = fi->level == 1 ? 0xd : 0xf;
+ break;
+ case ARMFault_Domain:
+ fsc = fi->level == 1 ? 0x9 : 0xb;
+ break;
+ case ARMFault_Translation:
+ fsc = fi->level == 1 ? 0x5 : 0x7;
+ break;
+ case ARMFault_SyncExternal:
+ fsc = 0x8 | (fi->ea << 12);
+ break;
+ case ARMFault_SyncExternalOnWalk:
+ fsc = fi->level == 1 ? 0xc : 0xe;
+ fsc |= (fi->ea << 12);
+ break;
+ case ARMFault_SyncParity:
+ fsc = 0x409;
+ break;
+ case ARMFault_SyncParityOnWalk:
+ fsc = fi->level == 1 ? 0x40c : 0x40e;
+ break;
+ case ARMFault_AsyncParity:
+ fsc = 0x408;
+ break;
+ case ARMFault_AsyncExternal:
+ fsc = 0x406 | (fi->ea << 12);
+ break;
+ case ARMFault_Debug:
+ fsc = 0x2;
+ break;
+ case ARMFault_TLBConflict:
+ fsc = 0x400;
+ break;
+ case ARMFault_Lockdown:
+ fsc = 0x404;
+ break;
+ case ARMFault_Exclusive:
+ fsc = 0x405;
+ break;
+ case ARMFault_ICacheMaint:
+ fsc = 0x4;
+ break;
+ case ARMFault_Background:
+ fsc = 0x0;
+ break;
+ case ARMFault_QEMU_NSCExec:
+ fsc = M_FAKE_FSR_NSC_EXEC;
+ break;
+ case ARMFault_QEMU_SFault:
+ fsc = M_FAKE_FSR_SFAULT;
+ break;
+ default:
+ /* Other faults can't occur in a context that requires a
+ * short-format status code.
+ */
+ g_assert_not_reached();
+ }
+
+ fsc |= (fi->domain << 4);
+ return fsc;
+}
+
+/**
+ * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
+ * Compare pseudocode EncodeLDFSC(), though unlike that function
+ * we fill in also the LPAE bit 9 of a DFSR format.
+ */
+static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
+{
+ uint32_t fsc;
+
+ switch (fi->type) {
+ case ARMFault_None:
+ return 0;
+ case ARMFault_AddressSize:
+ fsc = fi->level & 3;
+ break;
+ case ARMFault_AccessFlag:
+ fsc = (fi->level & 3) | (0x2 << 2);
+ break;
+ case ARMFault_Permission:
+ fsc = (fi->level & 3) | (0x3 << 2);
+ break;
+ case ARMFault_Translation:
+ fsc = (fi->level & 3) | (0x1 << 2);
+ break;
+ case ARMFault_SyncExternal:
+ fsc = 0x10 | (fi->ea << 12);
+ break;
+ case ARMFault_SyncExternalOnWalk:
+ fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
+ break;
+ case ARMFault_SyncParity:
+ fsc = 0x18;
+ break;
+ case ARMFault_SyncParityOnWalk:
+ fsc = (fi->level & 3) | (0x7 << 2);
+ break;
+ case ARMFault_AsyncParity:
+ fsc = 0x19;
+ break;
+ case ARMFault_AsyncExternal:
+ fsc = 0x11 | (fi->ea << 12);
+ break;
+ case ARMFault_Alignment:
+ fsc = 0x21;
+ break;
+ case ARMFault_Debug:
+ fsc = 0x22;
+ break;
+ case ARMFault_TLBConflict:
+ fsc = 0x30;
+ break;
+ case ARMFault_Lockdown:
+ fsc = 0x34;
+ break;
+ case ARMFault_Exclusive:
+ fsc = 0x35;
+ break;
+ default:
+ /* Other faults can't occur in a context that requires a
+ * long-format status code.
+ */
+ g_assert_not_reached();
+ }
+
+ fsc |= 1 << 9;
+ return fsc;
+}
+
/* Do a page table walk and add page to TLB if possible */
bool arm_tlb_fill(CPUState *cpu, vaddr address,
MMUAccessType access_type, int mmu_idx,
- uint32_t *fsr, ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi);
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
@@ -544,15 +729,17 @@ static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
case ARMMMUIdx_S1NSE1:
case ARMMMUIdx_S1E2:
case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
case ARMMMUIdx_MUser:
return false;
case ARMMMUIdx_S1E3:
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1SE1:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
case ARMMMUIdx_MSUser:
return true;
default:
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index a40a84ac24..b36206343d 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -116,12 +116,13 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
}
static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- uint32_t fsr, uint32_t fsc, ARMMMUFaultInfo *fi)
+ int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
bool same_el;
- uint32_t syn, exc;
+ uint32_t syn, exc, fsr, fsc;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
target_el = exception_target_el(env);
if (fi->stage2) {
@@ -130,14 +131,21 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
}
same_el = (arm_current_el(env) == target_el);
- if (fsc == 0x3f) {
- /* Caller doesn't have a long-format fault status code. This
- * should only happen if this fault will never actually be reported
- * to an EL that uses a syndrome register. Check that here.
- * 0x3f is a (currently) reserved FSC code, in case the constructed
- * syndrome does leak into the guest somehow.
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
+ /* LPAE format fault status register : bottom 6 bits are
+ * status code in the same form as needed for syndrome
+ */
+ fsr = arm_fi_to_lfsc(fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(fi);
+ /* Short format FSR : this fault will never actually be reported
+ * to an EL that uses a syndrome register. Use a (currently)
+ * reserved FSR code in case the constructed syndrome does leak
+ * into the guest somehow.
*/
- assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
+ fsc = 0x3f;
}
if (access_type == MMU_INST_FETCH) {
@@ -168,35 +176,16 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
{
bool ret;
- uint32_t fsr = 0;
ARMMMUFaultInfo fi = {};
- ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
+ ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
if (unlikely(ret)) {
ARMCPU *cpu = ARM_CPU(cs);
- uint32_t fsc;
-
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- if (fsr & (1 << 9)) {
- /* LPAE format fault status register : bottom 6 bits are
- * status code in the same form as needed for syndrome
- */
- fsc = extract32(fsr, 0, 6);
- } else {
- /* Short format FSR : this fault will never actually be reported
- * to an EL that uses a syndrome register. Use a (currently)
- * reserved FSR code in case the constructed syndrome does leak
- * into the guest somehow. deliver_fault will assert that
- * we don't target an EL using the syndrome.
- */
- fsc = 0x3f;
- }
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
- deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
+ deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
}
@@ -206,27 +195,13 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
int mmu_idx, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint32_t fsr, fsc;
ARMMMUFaultInfo fi = {};
- ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
-
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- /* the DFSR for an alignment fault depends on whether we're using
- * the LPAE long descriptor format, or the short descriptor format
- */
- if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
- fsr = (1 << 9) | 0x21;
- } else {
- fsr = 0x1;
- }
- fsc = 0x21;
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
- deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
+ fi.type = ARMFault_Alignment;
+ deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/* arm_cpu_do_transaction_failed: handle a memory system error response
@@ -240,15 +215,10 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
MemTxResult response, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint32_t fsr, fsc;
ARMMMUFaultInfo fi = {};
- ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
/* The EA bit in syndromes and fault status registers is an
* IMPDEF classification of external aborts. ARM implementations
@@ -256,20 +226,8 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
* Slave error (1); in QEMU we follow that.
*/
fi.ea = (response != MEMTX_DECODE_ERROR);
-
- /* The fault status register format depends on whether we're using
- * the LPAE long descriptor format, or the short descriptor format.
- */
- if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
- /* long descriptor form, STATUS 0b010000: synchronous ext abort */
- fsr = (fi.ea << 12) | (1 << 9) | 0x10;
- } else {
- /* short descriptor form, FSR 0b01000 : synchronous ext abort */
- fsr = (fi.ea << 12) | 0x8;
- }
- fsc = 0x10;
-
- deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
+ fi.type = ARMFault_SyncExternal;
+ deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 625ef2dfd2..ba94f7d045 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -405,10 +405,7 @@ static void unallocated_encoding(DisasContext *s)
static void init_tmp_a64_array(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
- int i;
- for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
- TCGV_UNUSED_I64(s->tmp_a64[i]);
- }
+ memset(s->tmp_a64, 0, sizeof(s->tmp_a64));
#endif
s->tmp_a64_count = 0;
}
@@ -6276,7 +6273,7 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
return;
}
- TCGV_UNUSED_PTR(fpst);
+ fpst = NULL;
break;
case 0xc: /* FMAXNMP */
case 0xd: /* FADDP */
@@ -6371,7 +6368,7 @@ static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
tcg_temp_free_i32(tcg_res);
}
- if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ if (fpst) {
tcg_temp_free_ptr(fpst);
}
}
@@ -6387,7 +6384,7 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
bool is_u, int size, int shift)
{
bool extended_result = false;
- bool round = !TCGV_IS_UNUSED_I64(tcg_rnd);
+ bool round = tcg_rnd != NULL;
int ext_lshift = 0;
TCGv_i64 tcg_src_hi;
@@ -6533,7 +6530,7 @@ static void handle_scalar_simd_shri(DisasContext *s,
uint64_t round_const = 1ULL << (shift - 1);
tcg_round = tcg_const_i64(round_const);
} else {
- TCGV_UNUSED_I64(tcg_round);
+ tcg_round = NULL;
}
tcg_rn = read_fp_dreg(s, rn);
@@ -6649,7 +6646,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
uint64_t round_const = 1ULL << (shift - 1);
tcg_round = tcg_const_i64(round_const);
} else {
- TCGV_UNUSED_I64(tcg_round);
+ tcg_round = NULL;
}
for (i = 0; i < elements; i++) {
@@ -8239,8 +8236,8 @@ static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
tcg_fpstatus = get_fpstatus_ptr();
} else {
- TCGV_UNUSED_I32(tcg_rmode);
- TCGV_UNUSED_PTR(tcg_fpstatus);
+ tcg_rmode = NULL;
+ tcg_fpstatus = NULL;
}
if (size == 3) {
@@ -8360,7 +8357,7 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
uint64_t round_const = 1ULL << (shift - 1);
tcg_round = tcg_const_i64(round_const);
} else {
- TCGV_UNUSED_I64(tcg_round);
+ tcg_round = NULL;
}
for (i = 0; i < elements; i++) {
@@ -8502,7 +8499,7 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
uint64_t round_const = 1ULL << (shift - 1);
tcg_round = tcg_const_i64(round_const);
} else {
- TCGV_UNUSED_I64(tcg_round);
+ tcg_round = NULL;
}
for (i = 0; i < elements; i++) {
@@ -9168,7 +9165,7 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
if (opcode >= 0x58) {
fpst = get_fpstatus_ptr();
} else {
- TCGV_UNUSED_PTR(fpst);
+ fpst = NULL;
}
if (!fp_access_check(s)) {
@@ -9305,7 +9302,7 @@ static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
}
}
- if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ if (fpst) {
tcg_temp_free_ptr(fpst);
}
}
@@ -10226,13 +10223,13 @@ static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
if (need_fpstatus) {
tcg_fpstatus = get_fpstatus_ptr();
} else {
- TCGV_UNUSED_PTR(tcg_fpstatus);
+ tcg_fpstatus = NULL;
}
if (need_rmode) {
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
} else {
- TCGV_UNUSED_I32(tcg_rmode);
+ tcg_rmode = NULL;
}
if (size == 3) {
@@ -10593,7 +10590,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
if (is_fp) {
fpst = get_fpstatus_ptr();
} else {
- TCGV_UNUSED_PTR(fpst);
+ fpst = NULL;
}
if (size == 3) {
@@ -10917,7 +10914,7 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
}
- if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ if (fpst) {
tcg_temp_free_ptr(fpst);
}
}
@@ -11293,8 +11290,8 @@ static void aarch64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- dc->insn_start_idx = tcg_op_buf_count();
tcg_gen_insn_start(dc->pc, 0, 0);
+ dc->insn_start = tcg_last_op();
}
static bool aarch64_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
diff --git a/target/arm/translate.c b/target/arm/translate.c
index f120932f44..c690658493 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -159,12 +159,16 @@ static inline int get_a32_user_mem_index(DisasContext *s)
return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
case ARMMMUIdx_MUser:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MPrivNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
case ARMMMUIdx_MSUser:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
+ case ARMMMUIdx_MSUserNegPri:
+ case ARMMMUIdx_MSPrivNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
case ARMMMUIdx_S2NS:
default:
g_assert_not_reached();
@@ -2165,8 +2169,8 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
tmp3 = tcg_const_i32((insn & 1) << 5);
break;
default:
- TCGV_UNUSED_I32(tmp2);
- TCGV_UNUSED_I32(tmp3);
+ tmp2 = NULL;
+ tmp3 = NULL;
}
gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
tcg_temp_free_i32(tmp3);
@@ -4935,7 +4939,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
}
} else /* size == 0 */ {
if (load) {
- TCGV_UNUSED_I32(tmp2);
+ tmp2 = NULL;
for (n = 0; n < 4; n++) {
tmp = tcg_temp_new_i32();
gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
@@ -6639,11 +6643,11 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp = neon_load_reg(rn, 1);
neon_store_scratch(2, tmp);
}
- TCGV_UNUSED_I32(tmp3);
+ tmp3 = NULL;
for (pass = 0; pass < 2; pass++) {
if (src1_wide) {
neon_load_reg64(cpu_V0, rn + pass);
- TCGV_UNUSED_I32(tmp);
+ tmp = NULL;
} else {
if (pass == 1 && rd == rn) {
tmp = neon_load_scratch(2);
@@ -6656,7 +6660,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
if (src2_wide) {
neon_load_reg64(cpu_V1, rm + pass);
- TCGV_UNUSED_I32(tmp2);
+ tmp2 = NULL;
} else {
if (pass == 1 && rd == rm) {
tmp2 = neon_load_scratch(2);
@@ -7074,7 +7078,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
if (rm & 1) {
return 1;
}
- TCGV_UNUSED_I32(tmp2);
+ tmp2 = NULL;
for (pass = 0; pass < 2; pass++) {
neon_load_reg64(cpu_V0, rm + pass);
tmp = tcg_temp_new_i32();
@@ -7213,7 +7217,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
if (neon_2rm_is_float_op(op)) {
tcg_gen_ld_f32(cpu_F0s, cpu_env,
neon_reg_offset(rm, pass));
- TCGV_UNUSED_I32(tmp);
+ tmp = NULL;
} else {
tmp = neon_load_reg(rm, pass);
}
@@ -8662,7 +8666,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
rn = (insn >> 16) & 0xf;
tmp = load_reg(s, rn);
} else {
- TCGV_UNUSED_I32(tmp);
+ tmp = NULL;
}
rd = (insn >> 12) & 0xf;
switch(op1) {
@@ -9501,7 +9505,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* compute total size */
loaded_base = 0;
- TCGV_UNUSED_I32(loaded_var);
+ loaded_var = NULL;
n = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i))
@@ -9806,7 +9810,7 @@ static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
if (insn & (1 << 22)) {
/* 0b1110_100x_x1xx_xxxx_xxxx_xxxx_xxxx_xxxx
* - load/store doubleword, load/store exclusive, ldacq/strel,
- * table branch.
+ * table branch, TT.
*/
if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
arm_dc_feature(s, ARM_FEATURE_V8)) {
@@ -9883,8 +9887,35 @@ static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
} else if ((insn & (1 << 23)) == 0) {
/* 0b1110_1000_010x_xxxx_xxxx_xxxx_xxxx_xxxx
* - load/store exclusive word
+ * - TT (v8M only)
*/
if (rs == 15) {
+ if (!(insn & (1 << 20)) &&
+ arm_dc_feature(s, ARM_FEATURE_M) &&
+ arm_dc_feature(s, ARM_FEATURE_V8)) {
+ /* 0b1110_1000_0100_xxxx_1111_xxxx_xxxx_xxxx
+ * - TT (v8M only)
+ */
+ bool alt = insn & (1 << 7);
+ TCGv_i32 addr, op, ttresp;
+
+ if ((insn & 0x3f) || rd == 13 || rd == 15 || rn == 15) {
+ /* we UNDEF for these UNPREDICTABLE cases */
+ goto illegal_op;
+ }
+
+ if (alt && !s->v8m_secure) {
+ goto illegal_op;
+ }
+
+ addr = load_reg(s, rn);
+ op = tcg_const_i32(extract32(insn, 6, 2));
+ ttresp = tcg_temp_new_i32();
+ gen_helper_v7m_tt(ttresp, cpu_env, addr, op);
+ tcg_temp_free_i32(addr);
+ tcg_temp_free_i32(op);
+ store_reg(s, rd, ttresp);
+ }
goto illegal_op;
}
addr = tcg_temp_local_new_i32();
@@ -10043,7 +10074,7 @@ static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
tcg_gen_addi_i32(addr, addr, -offset);
}
- TCGV_UNUSED_I32(loaded_var);
+ loaded_var = NULL;
for (i = 0; i < 16; i++) {
if ((insn & (1 << i)) == 0)
continue;
@@ -11324,7 +11355,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
} else if (op != 0xf) { /* mvn doesn't read its first operand */
tmp = load_reg(s, rd);
} else {
- TCGV_UNUSED_I32(tmp);
+ tmp = NULL;
}
tmp2 = load_reg(s, rm);
@@ -11655,7 +11686,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
tcg_gen_addi_i32(addr, addr, 4);
}
}
- TCGV_UNUSED_I32(tmp);
+ tmp = NULL;
if (insn & (1 << 8)) {
if (insn & (1 << 11)) {
/* pop pc */
@@ -11800,8 +11831,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn)
case 12:
{
/* load/store multiple */
- TCGv_i32 loaded_var;
- TCGV_UNUSED_I32(loaded_var);
+ TCGv_i32 loaded_var = NULL;
rn = (insn >> 8) & 0x7;
addr = load_reg(s, rn);
for (i = 0; i < 8; i++) {
@@ -12066,10 +12096,10 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- dc->insn_start_idx = tcg_op_buf_count();
tcg_gen_insn_start(dc->pc,
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
0);
+ dc->insn_start = tcg_last_op();
}
static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
diff --git a/target/arm/translate.h b/target/arm/translate.h
index 410ba79c0d..cd7313ace7 100644
--- a/target/arm/translate.h
+++ b/target/arm/translate.h
@@ -66,8 +66,8 @@ typedef struct DisasContext {
bool ss_same_el;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
int c15_cpar;
- /* TCG op index of the current insn_start. */
- int insn_start_idx;
+ /* TCG op of the current insn_start. */
+ TCGOp *insn_start;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];
@@ -117,9 +117,9 @@ static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
syn >>= ARM_INSN_START_WORD2_SHIFT;
/* We check and clear insn_start_idx to catch multiple updates. */
- assert(s->insn_start_idx != 0);
- tcg_set_insn_param(s->insn_start_idx, 2, syn);
- s->insn_start_idx = 0;
+ assert(s->insn_start != NULL);
+ tcg_set_insn_param(s->insn_start, 2, syn);
+ s->insn_start = NULL;
}
/* is_jmp field values */
diff --git a/target/cris/translate.c b/target/cris/translate.c
index 2831419845..f51a731db9 100644
--- a/target/cris/translate.c
+++ b/target/cris/translate.c
@@ -2603,7 +2603,7 @@ static int dec_movem_mr(CPUCRISState *env, DisasContext *dc)
tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
gen_load(dc, tmp32, addr, 4, 0);
} else {
- TCGV_UNUSED(tmp32);
+ tmp32 = NULL;
}
tcg_temp_free(addr);
@@ -3297,8 +3297,6 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
qemu_log("--------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start);
- qemu_log("\nisize=%d osize=%d\n",
- dc->pc - pc_start, tcg_op_buf_count());
qemu_log_unlock();
}
#endif
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index 53aa1f88c4..31d9a2a31b 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -125,7 +125,7 @@ void hppa_translate_init(void)
int i;
- TCGV_UNUSED(cpu_gr[0]);
+ cpu_gr[0] = NULL;
for (i = 1; i < 32; i++) {
cpu_gr[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUHPPAState, gr[i]),
@@ -140,28 +140,31 @@ void hppa_translate_init(void)
static DisasCond cond_make_f(void)
{
- DisasCond r = { .c = TCG_COND_NEVER };
- TCGV_UNUSED(r.a0);
- TCGV_UNUSED(r.a1);
- return r;
+ return (DisasCond){
+ .c = TCG_COND_NEVER,
+ .a0 = NULL,
+ .a1 = NULL,
+ };
}
static DisasCond cond_make_n(void)
{
- DisasCond r = { .c = TCG_COND_NE, .a0_is_n = true, .a1_is_0 = true };
- r.a0 = cpu_psw_n;
- TCGV_UNUSED(r.a1);
- return r;
+ return (DisasCond){
+ .c = TCG_COND_NE,
+ .a0 = cpu_psw_n,
+ .a0_is_n = true,
+ .a1 = NULL,
+ .a1_is_0 = true
+ };
}
static DisasCond cond_make_0(TCGCond c, TCGv a0)
{
- DisasCond r = { .c = c, .a1_is_0 = true };
+ DisasCond r = { .c = c, .a1 = NULL, .a1_is_0 = true };
assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
r.a0 = tcg_temp_new();
tcg_gen_mov_tl(r.a0, a0);
- TCGV_UNUSED(r.a1);
return r;
}
@@ -199,8 +202,8 @@ static void cond_free(DisasCond *cond)
}
cond->a0_is_n = false;
cond->a1_is_0 = false;
- TCGV_UNUSED(cond->a0);
- TCGV_UNUSED(cond->a1);
+ cond->a0 = NULL;
+ cond->a1 = NULL;
/* fallthru */
case TCG_COND_ALWAYS:
cond->c = TCG_COND_NEVER;
@@ -716,9 +719,8 @@ static DisasCond do_sed_cond(unsigned orig, TCGv res)
static DisasCond do_unit_cond(unsigned cf, TCGv res, TCGv in1, TCGv in2)
{
DisasCond cond;
- TCGv tmp, cb;
+ TCGv tmp, cb = NULL;
- TCGV_UNUSED(cb);
if (cf & 8) {
/* Since we want to test lots of carry-out bits all at once, do not
* do our normal thing and compute carry-in of bit B+1 since that
@@ -826,8 +828,8 @@ static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
DisasCond cond;
dest = tcg_temp_new();
- TCGV_UNUSED(cb);
- TCGV_UNUSED(cb_msb);
+ cb = NULL;
+ cb_msb = NULL;
if (shift) {
tmp = get_temp(ctx);
@@ -856,7 +858,7 @@ static DisasJumpType do_add(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
}
/* Compute signed overflow if required. */
- TCGV_UNUSED(sv);
+ sv = NULL;
if (is_tsv || c == 6) {
sv = do_add_sv(ctx, dest, in1, in2);
if (is_tsv) {
@@ -919,7 +921,7 @@ static DisasJumpType do_sub(DisasContext *ctx, unsigned rt, TCGv in1, TCGv in2,
tcg_temp_free(zero);
/* Compute signed overflow if required. */
- TCGV_UNUSED(sv);
+ sv = NULL;
if (is_tsv || c == 6) {
sv = do_sub_sv(ctx, dest, in1, in2);
if (is_tsv) {
@@ -965,7 +967,7 @@ static DisasJumpType do_cmpclr(DisasContext *ctx, unsigned rt, TCGv in1,
tcg_gen_sub_tl(dest, in1, in2);
/* Compute signed overflow if required. */
- TCGV_UNUSED(sv);
+ sv = NULL;
if ((cf >> 1) == 6) {
sv = do_sub_sv(ctx, dest, in1, in2);
}
@@ -2070,8 +2072,7 @@ static DisasJumpType trans_ds(DisasContext *ctx, uint32_t insn,
/* Install the new nullification. */
if (cf) {
- TCGv sv;
- TCGV_UNUSED(sv);
+ TCGv sv = NULL;
if (cf >> 1 == 6) {
/* ??? The lshift is supposed to contribute to overflow. */
sv = do_add_sv(ctx, dest, add1, add2);
@@ -2542,7 +2543,7 @@ static DisasJumpType trans_cmpb(DisasContext *ctx, uint32_t insn,
tcg_gen_sub_tl(dest, in1, in2);
- TCGV_UNUSED(sv);
+ sv = NULL;
if (c == 6) {
sv = do_sub_sv(ctx, dest, in1, in2);
}
@@ -2571,8 +2572,8 @@ static DisasJumpType trans_addb(DisasContext *ctx, uint32_t insn,
}
in2 = load_gpr(ctx, r);
dest = dest_gpr(ctx, r);
- TCGV_UNUSED(sv);
- TCGV_UNUSED(cb_msb);
+ sv = NULL;
+ cb_msb = NULL;
switch (c) {
default:
@@ -3732,18 +3733,16 @@ static int hppa_tr_init_disas_context(DisasContextBase *dcbase,
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
TranslationBlock *tb = ctx->base.tb;
- int i, bound;
+ int bound;
ctx->cs = cs;
ctx->iaoq_f = tb->pc;
ctx->iaoq_b = tb->cs_base;
ctx->iaoq_n = -1;
- TCGV_UNUSED(ctx->iaoq_n_var);
+ ctx->iaoq_n_var = NULL;
ctx->ntemps = 0;
- for (i = 0; i < ARRAY_SIZE(ctx->temps); ++i) {
- TCGV_UNUSED(ctx->temps[i]);
- }
+ memset(ctx->temps, 0, sizeof(ctx->temps));
bound = -(tb->pc | TARGET_PAGE_MASK) / 4;
return MIN(max_insns, bound);
@@ -3804,7 +3803,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
tcg_gen_addi_tl(ctx->iaoq_n_var, cpu_iaoq_b, 4);
} else {
ctx->iaoq_n = ctx->iaoq_b + 4;
- TCGV_UNUSED(ctx->iaoq_n_var);
+ ctx->iaoq_n_var = NULL;
}
if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
@@ -3819,7 +3818,7 @@ static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
/* Free any temporaries allocated. */
for (i = 0, n = ctx->ntemps; i < n; ++i) {
tcg_temp_free(ctx->temps[i]);
- TCGV_UNUSED(ctx->temps[i]);
+ ctx->temps[i] = NULL;
}
ctx->ntemps = 0;
diff --git a/target/i386/Makefile.objs b/target/i386/Makefile.objs
index 6a26e9d9f0..44103a693b 100644
--- a/target/i386/Makefile.objs
+++ b/target/i386/Makefile.objs
@@ -12,4 +12,5 @@ obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-windows.o
endif
ifdef CONFIG_DARWIN
obj-$(CONFIG_HAX) += hax-all.o hax-mem.o hax-darwin.o
+obj-$(CONFIG_HVF) += hvf/
endif
diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h
index c2205e6077..22f95eb3a4 100644
--- a/target/i386/cpu-qom.h
+++ b/target/i386/cpu-qom.h
@@ -47,7 +47,7 @@ typedef struct X86CPUDefinition X86CPUDefinition;
/**
* X86CPUClass:
* @cpu_def: CPU model definition
- * @kvm_required: Whether CPU model requires KVM to be enabled.
+ * @host_cpuid_required: Whether CPU model requires cpuid from host.
* @ordering: Ordering on the "-cpu help" CPU model list.
* @migration_safe: See CpuDefinitionInfo::migration_safe
* @static_model: See CpuDefinitionInfo::static
@@ -66,7 +66,7 @@ typedef struct X86CPUClass {
*/
X86CPUDefinition *cpu_def;
- bool kvm_required;
+ bool host_cpuid_required;
int ordering;
bool migration_safe;
bool static_model;
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 045d66191f..3818d72831 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -22,6 +22,7 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "sysemu/kvm.h"
+#include "sysemu/hvf.h"
#include "sysemu/cpus.h"
#include "kvm_i386.h"
@@ -437,9 +438,9 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
[FEAT_7_0_ECX] = {
.feat_names = {
NULL, "avx512vbmi", "umip", "pku",
- "ospke", NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
- NULL, NULL, "avx512-vpopcntdq", NULL,
+ "ospke", NULL, "avx512vbmi2", NULL,
+ "gfni", "vaes", "vpclmulqdq", "avx512vnni",
+ "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
"la57", NULL, NULL, NULL,
NULL, NULL, "rdpid", NULL,
NULL, NULL, NULL, NULL,
@@ -615,6 +616,11 @@ static uint32_t xsave_area_size(uint64_t mask)
return ret;
}
+static inline bool accel_uses_host_cpuid(void)
+{
+ return kvm_enabled() || hvf_enabled();
+}
+
static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
{
return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
@@ -1686,10 +1692,15 @@ static void max_x86_cpu_initfn(Object *obj)
*/
cpu->max_features = true;
- if (kvm_enabled()) {
+ if (accel_uses_host_cpuid()) {
char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
int family, model, stepping;
+ X86CPUDefinition host_cpudef = { };
+ uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+
+ host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
+ x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
host_vendor_fms(vendor, &family, &model, &stepping);
@@ -1703,12 +1714,21 @@ static void max_x86_cpu_initfn(Object *obj)
object_property_set_str(OBJECT(cpu), model_id, "model-id",
&error_abort);
- env->cpuid_min_level =
- kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
- env->cpuid_min_xlevel =
- kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
- env->cpuid_min_xlevel2 =
- kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+ if (kvm_enabled()) {
+ env->cpuid_min_level =
+ kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+ } else {
+ env->cpuid_min_level =
+ hvf_get_supported_cpuid(0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
+ }
if (lmce_supported()) {
object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
@@ -1734,18 +1754,21 @@ static const TypeInfo max_x86_cpu_type_info = {
.class_init = max_x86_cpu_class_init,
};
-#ifdef CONFIG_KVM
-
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
- xcc->kvm_required = true;
+ xcc->host_cpuid_required = true;
xcc->ordering = 8;
- xcc->model_description =
- "KVM processor with all supported host features "
- "(only available in KVM mode)";
+ if (kvm_enabled()) {
+ xcc->model_description =
+ "KVM processor with all supported host features ";
+ } else if (hvf_enabled()) {
+ xcc->model_description =
+ "HVF processor with all supported host features ";
+ }
}
static const TypeInfo host_x86_cpu_type_info = {
@@ -1767,7 +1790,7 @@ static void report_unavailable_features(FeatureWord w, uint32_t mask)
assert(reg);
warn_report("%s doesn't support requested feature: "
"CPUID.%02XH:%s%s%s [bit %d]",
- kvm_enabled() ? "host" : "TCG",
+ accel_uses_host_cpuid() ? "host" : "TCG",
f->cpuid_eax, reg,
f->feat_names[i] ? "." : "",
f->feat_names[i] ? f->feat_names[i] : "", i);
@@ -2218,9 +2241,9 @@ static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
Error *err = NULL;
strList **next = missing_feats;
- if (xcc->kvm_required && !kvm_enabled()) {
+ if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
strList *new = g_new0(strList, 1);
- new->value = g_strdup("kvm");;
+ new->value = g_strdup("kvm");
*missing_feats = new;
return;
}
@@ -2380,6 +2403,10 @@ static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
wi->cpuid_ecx,
wi->cpuid_reg);
+ } else if (hvf_enabled()) {
+ r = hvf_get_supported_cpuid(wi->cpuid_eax,
+ wi->cpuid_ecx,
+ wi->cpuid_reg);
} else if (tcg_enabled()) {
r = wi->tcg_features;
} else {
@@ -2439,6 +2466,7 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
}
/* Special cases not set in the X86CPUDefinition structs: */
+ /* TODO: in-kernel irqchip for hvf */
if (kvm_enabled()) {
if (!kvm_irqchip_in_kernel()) {
x86_cpu_change_kvm_default("x2apic", "off");
@@ -2459,7 +2487,7 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
* when doing cross vendor migration
*/
vendor = def->vendor;
- if (kvm_enabled()) {
+ if (accel_uses_host_cpuid()) {
uint32_t ebx = 0, ecx = 0, edx = 0;
host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
@@ -2910,6 +2938,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
*ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
*edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
+ } else if (hvf_enabled() && cpu->enable_pmu) {
+ *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
+ *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
+ *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
+ *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
} else {
*eax = 0;
*ebx = 0;
@@ -3252,6 +3285,9 @@ static void x86_cpu_reset(CPUState *s)
memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+ env->interrupt_injected = -1;
+ env->exception_injected = -1;
+ env->nmi_injected = false;
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
@@ -3261,6 +3297,9 @@ static void x86_cpu_reset(CPUState *s)
if (kvm_enabled()) {
kvm_arch_reset_vcpu(cpu);
}
+ else if (hvf_enabled()) {
+ hvf_reset_vcpu(s);
+ }
#endif
}
@@ -3300,6 +3339,7 @@ APICCommonClass *apic_get_class(void)
{
const char *apic_type = "apic";
+ /* TODO: in-kernel irqchip for hvf */
if (kvm_apic_in_kernel()) {
apic_type = "kvm-apic";
} else if (xen_enabled()) {
@@ -3613,7 +3653,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
Error *local_err = NULL;
static bool ht_warned;
- if (xcc->kvm_required && !kvm_enabled()) {
+ if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
char *name = x86_cpu_class_get_model_name(xcc);
error_setg(&local_err, "CPU model '%s' requires KVM", name);
g_free(name);
@@ -3635,7 +3675,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
x86_cpu_report_filtered_features(cpu);
if (cpu->enforce_cpuid) {
error_setg(&local_err,
- kvm_enabled() ?
+ accel_uses_host_cpuid() ?
"Host doesn't support requested features" :
"TCG doesn't support requested features");
goto out;
@@ -3658,7 +3698,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
* consumer AMD devices but nothing else.
*/
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
- if (kvm_enabled()) {
+ if (accel_uses_host_cpuid()) {
uint32_t host_phys_bits = x86_host_phys_bits();
static bool warned;
@@ -3736,11 +3776,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
if (tcg_enabled()) {
- AddressSpace *as_normal = g_new0(AddressSpace, 1);
- AddressSpace *as_smm = g_new(AddressSpace, 1);
-
- address_space_init(as_normal, cs->memory, "cpu-memory");
-
cpu->cpu_as_mem = g_new(MemoryRegion, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
@@ -3755,11 +3790,10 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
get_system_memory(), 0, ~0ull);
memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
memory_region_set_enabled(cpu->cpu_as_mem, true);
- address_space_init(as_smm, cpu->cpu_as_root, "CPU");
cs->num_ases = 2;
- cpu_address_space_init(cs, as_normal, 0);
- cpu_address_space_init(cs, as_smm, 1);
+ cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
+ cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
/* ... SMRAM with higher priority, linked from /machine/smram. */
cpu->machine_done.notify = x86_cpu_machine_done;
@@ -4278,7 +4312,7 @@ static void x86_cpu_register_types(void)
}
type_register_static(&max_x86_cpu_type_info);
type_register_static(&x86_base_cpu_type_info);
-#ifdef CONFIG_KVM
+#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
type_register_static(&host_x86_cpu_type_info);
#endif
}
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index b086b1528b..62c4742703 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -30,6 +30,8 @@
#define TARGET_LONG_BITS 32
#endif
+#include "exec/cpu-defs.h"
+
/* The x86 has a strong memory model with some store-after-load re-ordering */
#define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
@@ -50,48 +52,64 @@
#define CPUArchState struct CPUX86State
-#include "exec/cpu-defs.h"
-
#ifdef CONFIG_TCG
#include "fpu/softfloat.h"
#endif
-#define R_EAX 0
-#define R_ECX 1
-#define R_EDX 2
-#define R_EBX 3
-#define R_ESP 4
-#define R_EBP 5
-#define R_ESI 6
-#define R_EDI 7
-
-#define R_AL 0
-#define R_CL 1
-#define R_DL 2
-#define R_BL 3
-#define R_AH 4
-#define R_CH 5
-#define R_DH 6
-#define R_BH 7
-
-#define R_ES 0
-#define R_CS 1
-#define R_SS 2
-#define R_DS 3
-#define R_FS 4
-#define R_GS 5
+enum {
+ R_EAX = 0,
+ R_ECX = 1,
+ R_EDX = 2,
+ R_EBX = 3,
+ R_ESP = 4,
+ R_EBP = 5,
+ R_ESI = 6,
+ R_EDI = 7,
+ R_R8 = 8,
+ R_R9 = 9,
+ R_R10 = 10,
+ R_R11 = 11,
+ R_R12 = 12,
+ R_R13 = 13,
+ R_R14 = 14,
+ R_R15 = 15,
+
+ R_AL = 0,
+ R_CL = 1,
+ R_DL = 2,
+ R_BL = 3,
+ R_AH = 4,
+ R_CH = 5,
+ R_DH = 6,
+ R_BH = 7,
+};
+
+typedef enum X86Seg {
+ R_ES = 0,
+ R_CS = 1,
+ R_SS = 2,
+ R_DS = 3,
+ R_FS = 4,
+ R_GS = 5,
+ R_LDTR = 6,
+ R_TR = 7,
+} X86Seg;
/* segment descriptor fields */
-#define DESC_G_MASK (1 << 23)
+#define DESC_G_SHIFT 23
+#define DESC_G_MASK (1 << DESC_G_SHIFT)
#define DESC_B_SHIFT 22
#define DESC_B_MASK (1 << DESC_B_SHIFT)
#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
#define DESC_L_MASK (1 << DESC_L_SHIFT)
-#define DESC_AVL_MASK (1 << 20)
-#define DESC_P_MASK (1 << 15)
+#define DESC_AVL_SHIFT 20
+#define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
+#define DESC_P_SHIFT 15
+#define DESC_P_MASK (1 << DESC_P_SHIFT)
#define DESC_DPL_SHIFT 13
#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
-#define DESC_S_MASK (1 << 12)
+#define DESC_S_SHIFT 12
+#define DESC_S_MASK (1 << DESC_S_SHIFT)
#define DESC_TYPE_SHIFT 8
#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
#define DESC_A_MASK (1 << 8)
@@ -631,10 +649,17 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
+#define CPUID_7_0_ECX_AVX512BMI (1U << 1)
#define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
#define CPUID_7_0_ECX_UMIP (1U << 2)
#define CPUID_7_0_ECX_PKU (1U << 3)
#define CPUID_7_0_ECX_OSPKE (1U << 4)
+#define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */
+#define CPUID_7_0_ECX_GFNI (1U << 8)
+#define CPUID_7_0_ECX_VAES (1U << 9)
+#define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
+#define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
+#define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
#define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */
#define CPUID_7_0_ECX_LA57 (1U << 16)
#define CPUID_7_0_ECX_RDPID (1U << 22)
@@ -806,6 +831,20 @@ typedef struct SegmentCache {
float64 _d_##n[(bits)/64]; \
}
+typedef union {
+ uint8_t _b[16];
+ uint16_t _w[8];
+ uint32_t _l[4];
+ uint64_t _q[2];
+} XMMReg;
+
+typedef union {
+ uint8_t _b[32];
+ uint16_t _w[16];
+ uint32_t _l[8];
+ uint64_t _q[4];
+} YMMReg;
+
typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
typedef MMREG_UNION(MMXReg, 64) MMXReg;
@@ -1041,7 +1080,11 @@ typedef struct CPUX86State {
ZMMReg xmm_t0;
MMXReg mmx_t0;
+ XMMReg ymmh_regs[CPU_NB_REGS];
+
uint64_t opmask_regs[NB_OPMASK_REGS];
+ YMMReg zmmh_regs[CPU_NB_REGS];
+ ZMMReg hi16_zmm_regs[CPU_NB_REGS];
/* sysenter registers */
uint32_t sysenter_cs;
@@ -1091,14 +1134,16 @@ typedef struct CPUX86State {
uint64_t async_pf_en_msr;
uint64_t pv_eoi_en_msr;
+ /* Partition-wide HV MSRs, will be updated only on the first vcpu */
uint64_t msr_hv_hypercall;
uint64_t msr_hv_guest_os_id;
- uint64_t msr_hv_vapic;
uint64_t msr_hv_tsc;
+
+ /* Per-VCPU HV MSRs */
+ uint64_t msr_hv_vapic;
uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
uint64_t msr_hv_runtime;
uint64_t msr_hv_synic_control;
- uint64_t msr_hv_synic_version;
uint64_t msr_hv_synic_evt_page;
uint64_t msr_hv_synic_msg_page;
uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
@@ -1164,11 +1209,15 @@ typedef struct CPUX86State {
int32_t interrupt_injected;
uint8_t soft_interrupt;
uint8_t has_error_code;
+ uint32_t ins_len;
uint32_t sipi_vector;
bool tsc_valid;
int64_t tsc_khz;
int64_t user_tsc_khz; /* for sanity check only */
void *kvm_xsave_buf;
+#if defined(CONFIG_HVF)
+ HVFX86EmulatorState *hvf_emul;
+#endif
uint64_t mcg_cap;
uint64_t mcg_ctl;
diff --git a/target/i386/hax-darwin.c b/target/i386/hax-darwin.c
index 1c5bbd0a2d..ee9417454c 100644
--- a/target/i386/hax-darwin.c
+++ b/target/i386/hax-darwin.c
@@ -11,13 +11,9 @@
*/
/* HAX module interface - darwin version */
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
+#include "qemu/osdep.h"
#include <sys/ioctl.h>
-#include "qemu/osdep.h"
#include "target/i386/hax-i386.h"
hax_fd hax_mod_open(void)
diff --git a/target/i386/hax-darwin.h b/target/i386/hax-darwin.h
index 0c0968b77d..fb8e25a096 100644
--- a/target/i386/hax-darwin.h
+++ b/target/i386/hax-darwin.h
@@ -15,10 +15,7 @@
#ifndef TARGET_I386_HAX_DARWIN_H
#define TARGET_I386_HAX_DARWIN_H
-#include <sys/types.h>
#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <stdarg.h>
#define HAX_INVALID_FD (-1)
static inline int hax_invalid_fd(hax_fd fd)
diff --git a/target/i386/hax-windows.h b/target/i386/hax-windows.h
index 1d8f68de91..004f867694 100644
--- a/target/i386/hax-windows.h
+++ b/target/i386/hax-windows.h
@@ -20,12 +20,9 @@
#ifndef TARGET_I386_HAX_WINDOWS_H
#define TARGET_I386_HAX_WINDOWS_H
-#include <windows.h>
#include <memory.h>
#include <malloc.h>
#include <winioctl.h>
-#include <string.h>
-#include <stdio.h>
#include <windef.h>
#define HAX_INVALID_FD INVALID_HANDLE_VALUE
diff --git a/target/i386/hvf/Makefile.objs b/target/i386/hvf/Makefile.objs
new file mode 100644
index 0000000000..927b86bc67
--- /dev/null
+++ b/target/i386/hvf/Makefile.objs
@@ -0,0 +1,2 @@
+obj-y += hvf.o
+obj-y += x86.o x86_cpuid.o x86_decode.o x86_descr.o x86_emu.o x86_flags.o x86_mmu.o x86hvf.o x86_task.o
diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md
new file mode 100644
index 0000000000..0d27a0d52b
--- /dev/null
+++ b/target/i386/hvf/README.md
@@ -0,0 +1,7 @@
+# OS X Hypervisor.framework support in QEMU
+
+These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desktop Hosted Hypervisor) (last known location: https://github.com/veertuinc/vdhh) with some minor changes, the most significant of which were:
+
+1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, kvm_xsave_buf) due to historical differences + QEMU needing to handle more emulation targets.
+2. Removal of `apic_page` and hyperv-related functionality.
+3. More relaxed use of `qemu_mutex_lock_iothread`.
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
new file mode 100644
index 0000000000..2232501552
--- /dev/null
+++ b/target/i386/hvf/hvf-i386.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU Hypervisor.framework (HVF) support
+ *
+ * Copyright 2017 Google Inc
+ *
+ * Adapted from target-i386/hax-i386.h:
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _HVF_I386_H
+#define _HVF_I386_H
+
+#include "sysemu/hvf.h"
+#include "cpu.h"
+#include "x86.h"
+
+#define HVF_MAX_VCPU 0x10
+#define MAX_VM_ID 0x40
+#define MAX_VCPU_ID 0x40
+
+extern struct hvf_state hvf_global;
+
+struct hvf_vm {
+ int id;
+ struct hvf_vcpu_state *vcpus[HVF_MAX_VCPU];
+};
+
+struct hvf_state {
+ uint32_t version;
+ struct hvf_vm *vm;
+ uint64_t mem_quota;
+};
+
+#ifdef NEED_CPU_H
+/* Functions exported to host specific mode */
+
+/* Host specific functions */
+int hvf_inject_interrupt(CPUArchState *env, int vector);
+int hvf_vcpu_run(struct hvf_vcpu_state *vcpu);
+#endif
+
+#endif
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
new file mode 100644
index 0000000000..010866ed22
--- /dev/null
+++ b/target/i386/hvf/hvf.c
@@ -0,0 +1,959 @@
+/* Copyright 2008 IBM Corporation
+ * 2008 Red Hat, Inc.
+ * Copyright 2011 Intel Corporation
+ * Copyright 2016 Veertu, Inc.
+ * Copyright 2017 The Android Open Source Project
+ *
+ * QEMU Hypervisor.framework support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+
+#include "sysemu/hvf.h"
+#include "hvf-i386.h"
+#include "vmcs.h"
+#include "vmx.h"
+#include "x86.h"
+#include "x86_descr.h"
+#include "x86_mmu.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+#include "x86_task.h"
+#include "x86hvf.h"
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+
+#include "exec/address-spaces.h"
+#include "exec/exec-all.h"
+#include "exec/ioport.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/boards.h"
+#include "qemu/main-loop.h"
+#include "strings.h"
+#include "sysemu/accel.h"
+#include "sysemu/sysemu.h"
+#include "target/i386/cpu.h"
+
+pthread_rwlock_t mem_lock = PTHREAD_RWLOCK_INITIALIZER;
+HVFState *hvf_state;
+int hvf_disabled = 1;
+
+static void assert_hvf_ok(hv_return_t ret)
+{
+ if (ret == HV_SUCCESS) {
+ return;
+ }
+
+ switch (ret) {
+ case HV_ERROR:
+ error_report("Error: HV_ERROR\n");
+ break;
+ case HV_BUSY:
+ error_report("Error: HV_BUSY\n");
+ break;
+ case HV_BAD_ARGUMENT:
+ error_report("Error: HV_BAD_ARGUMENT\n");
+ break;
+ case HV_NO_RESOURCES:
+ error_report("Error: HV_NO_RESOURCES\n");
+ break;
+ case HV_NO_DEVICE:
+ error_report("Error: HV_NO_DEVICE\n");
+ break;
+ case HV_UNSUPPORTED:
+ error_report("Error: HV_UNSUPPORTED\n");
+ break;
+ default:
+ error_report("Unknown Error\n");
+ }
+
+ abort();
+}
+
+/* Memory slots */
+hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t end)
+{
+ hvf_slot *slot;
+ int x;
+ for (x = 0; x < hvf_state->num_slots; ++x) {
+ slot = &hvf_state->slots[x];
+ if (slot->size && start < (slot->start + slot->size) &&
+ end > slot->start) {
+ return slot;
+ }
+ }
+ return NULL;
+}
+
+struct mac_slot {
+ int present;
+ uint64_t size;
+ uint64_t gpa_start;
+ uint64_t gva;
+};
+
+struct mac_slot mac_slots[32];
+#define ALIGN(x, y) (((x) + (y) - 1) & ~((y) - 1))
+
+static int do_hvf_set_memory(hvf_slot *slot)
+{
+ struct mac_slot *macslot;
+ hv_memory_flags_t flags;
+ hv_return_t ret;
+
+ macslot = &mac_slots[slot->slot_id];
+
+ if (macslot->present) {
+ if (macslot->size != slot->size) {
+ macslot->present = 0;
+ ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
+ assert_hvf_ok(ret);
+ }
+ }
+
+ if (!slot->size) {
+ return 0;
+ }
+
+ flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
+
+ macslot->present = 1;
+ macslot->gpa_start = slot->start;
+ macslot->size = slot->size;
+ ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
+ assert_hvf_ok(ret);
+ return 0;
+}
+
+void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
+{
+ hvf_slot *mem;
+ MemoryRegion *area = section->mr;
+
+ if (!memory_region_is_ram(area)) {
+ return;
+ }
+
+ mem = hvf_find_overlap_slot(
+ section->offset_within_address_space,
+ section->offset_within_address_space + int128_get64(section->size));
+
+ if (mem && add) {
+ if (mem->size == int128_get64(section->size) &&
+ mem->start == section->offset_within_address_space &&
+ mem->mem == (memory_region_get_ram_ptr(area) +
+ section->offset_within_region)) {
+ return; /* Same region was attempted to register, go away. */
+ }
+ }
+
+ /* Region needs to be reset. set the size to 0 and remap it. */
+ if (mem) {
+ mem->size = 0;
+ if (do_hvf_set_memory(mem)) {
+ error_report("Failed to reset overlapping slot\n");
+ abort();
+ }
+ }
+
+ if (!add) {
+ return;
+ }
+
+ /* Now make a new slot. */
+ int x;
+
+ for (x = 0; x < hvf_state->num_slots; ++x) {
+ mem = &hvf_state->slots[x];
+ if (!mem->size) {
+ break;
+ }
+ }
+
+ if (x == hvf_state->num_slots) {
+ error_report("No free slots\n");
+ abort();
+ }
+
+ mem->size = int128_get64(section->size);
+ mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
+ mem->start = section->offset_within_address_space;
+ mem->region = area;
+
+ if (do_hvf_set_memory(mem)) {
+ error_report("Error registering new memory slot\n");
+ abort();
+ }
+}
+
+void vmx_update_tpr(CPUState *cpu)
+{
+ /* TODO: need integrate APIC handling */
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
+ int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
+
+ wreg(cpu->hvf_fd, HV_X86_TPR, tpr);
+ if (irr == -1) {
+ wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
+ } else {
+ wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
+ irr >> 4);
+ }
+}
+
+void update_apic_tpr(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int tpr = rreg(cpu->hvf_fd, HV_X86_TPR) >> 4;
+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+}
+
+#define VECTORING_INFO_VECTOR_MASK 0xff
+
+static void hvf_handle_interrupt(CPUState * cpu, int mask)
+{
+ cpu->interrupt_request |= mask;
+ if (!qemu_cpu_is_self(cpu)) {
+ qemu_cpu_kick(cpu);
+ }
+}
+
+void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
+ int direction, int size, int count)
+{
+ int i;
+ uint8_t *ptr = buffer;
+
+ for (i = 0; i < count; i++) {
+ address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
+ ptr, size,
+ direction);
+ ptr += size;
+ }
+}
+
+/* TODO: synchronize vcpu state */
+static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUState *cpu_state = cpu;
+ if (cpu_state->vcpu_dirty == 0) {
+ hvf_get_registers(cpu_state);
+ }
+
+ cpu_state->vcpu_dirty = 1;
+}
+
+void hvf_cpu_synchronize_state(CPUState *cpu_state)
+{
+ if (cpu_state->vcpu_dirty == 0) {
+ run_on_cpu(cpu_state, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
+ }
+}
+
+static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUState *cpu_state = cpu;
+ hvf_put_registers(cpu_state);
+ cpu_state->vcpu_dirty = false;
+}
+
+void hvf_cpu_synchronize_post_reset(CPUState *cpu_state)
+{
+ run_on_cpu(cpu_state, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
+}
+
+void _hvf_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
+{
+ CPUState *cpu_state = cpu;
+ hvf_put_registers(cpu_state);
+ cpu_state->vcpu_dirty = false;
+}
+
+void hvf_cpu_synchronize_post_init(CPUState *cpu_state)
+{
+ run_on_cpu(cpu_state, _hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
+}
+
+static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
+{
+ int read, write;
+
+ /* EPT fault on an instruction fetch doesn't make sense here */
+ if (ept_qual & EPT_VIOLATION_INST_FETCH) {
+ return false;
+ }
+
+ /* EPT fault must be a read fault or a write fault */
+ read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
+ write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
+ if ((read | write) == 0) {
+ return false;
+ }
+
+ if (write && slot) {
+ if (slot->flags & HVF_SLOT_LOG) {
+ memory_region_set_dirty(slot->region, gpa - slot->start, 1);
+ hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
+ HV_MEMORY_READ | HV_MEMORY_WRITE);
+ }
+ }
+
+ /*
+ * The EPT violation must have been caused by accessing a
+ * guest-physical address that is a translation of a guest-linear
+ * address.
+ */
+ if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
+ (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
+ return false;
+ }
+
+ return !slot;
+}
+
+static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
+{
+ hvf_slot *slot;
+
+ slot = hvf_find_overlap_slot(
+ section->offset_within_address_space,
+ section->offset_within_address_space + int128_get64(section->size));
+
+ /* protect region against writes; begin tracking it */
+ if (on) {
+ slot->flags |= HVF_SLOT_LOG;
+ hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
+ HV_MEMORY_READ);
+ /* stop tracking region*/
+ } else {
+ slot->flags &= ~HVF_SLOT_LOG;
+ hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
+ HV_MEMORY_READ | HV_MEMORY_WRITE);
+ }
+}
+
+static void hvf_log_start(MemoryListener *listener,
+ MemoryRegionSection *section, int old, int new)
+{
+ if (old != 0) {
+ return;
+ }
+
+ hvf_set_dirty_tracking(section, 1);
+}
+
+static void hvf_log_stop(MemoryListener *listener,
+ MemoryRegionSection *section, int old, int new)
+{
+ if (new != 0) {
+ return;
+ }
+
+ hvf_set_dirty_tracking(section, 0);
+}
+
+static void hvf_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ /*
+ * sync of dirty pages is handled elsewhere; just make sure we keep
+ * tracking the region.
+ */
+ hvf_set_dirty_tracking(section, 1);
+}
+
+static void hvf_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ hvf_set_phys_mem(section, true);
+}
+
+static void hvf_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ hvf_set_phys_mem(section, false);
+}
+
+static MemoryListener hvf_memory_listener = {
+ .priority = 10,
+ .region_add = hvf_region_add,
+ .region_del = hvf_region_del,
+ .log_start = hvf_log_start,
+ .log_stop = hvf_log_stop,
+ .log_sync = hvf_log_sync,
+};
+
+void hvf_reset_vcpu(CPUState *cpu) {
+
+ /* TODO: this shouldn't be needed; there is already a call to
+ * cpu_synchronize_all_post_reset in vl.c
+ */
+ wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, 0);
+ macvm_set_cr0(cpu->hvf_fd, 0x60000010);
+
+ wvmcs(cpu->hvf_fd, VMCS_CR4_MASK, CR4_VMXE_MASK);
+ wvmcs(cpu->hvf_fd, VMCS_CR4_SHADOW, 0x0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CR4, CR4_VMXE_MASK);
+
+ /* set VMCS guest state fields */
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_SELECTOR, 0xf000);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_LIMIT, 0xffff);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_ACCESS_RIGHTS, 0x9b);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CS_BASE, 0xffff0000);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_LIMIT, 0xffff);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_ACCESS_RIGHTS, 0x93);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_DS_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_LIMIT, 0xffff);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_ACCESS_RIGHTS, 0x93);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_ES_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_LIMIT, 0xffff);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_ACCESS_RIGHTS, 0x93);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_LIMIT, 0xffff);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_ACCESS_RIGHTS, 0x93);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_LIMIT, 0xffff);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_ACCESS_RIGHTS, 0x93);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_SS_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x10000);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_SELECTOR, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_LIMIT, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_ACCESS_RIGHTS, 0x83);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_TR_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE, 0);
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT, 0);
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE, 0);
+
+ /*wvmcs(cpu->hvf_fd, VMCS_GUEST_CR2, 0x0);*/
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, 0x0);
+
+ wreg(cpu->hvf_fd, HV_X86_RIP, 0xfff0);
+ wreg(cpu->hvf_fd, HV_X86_RDX, 0x623);
+ wreg(cpu->hvf_fd, HV_X86_RFLAGS, 0x2);
+ wreg(cpu->hvf_fd, HV_X86_RSP, 0x0);
+ wreg(cpu->hvf_fd, HV_X86_RAX, 0x0);
+ wreg(cpu->hvf_fd, HV_X86_RBX, 0x0);
+ wreg(cpu->hvf_fd, HV_X86_RCX, 0x0);
+ wreg(cpu->hvf_fd, HV_X86_RSI, 0x0);
+ wreg(cpu->hvf_fd, HV_X86_RDI, 0x0);
+ wreg(cpu->hvf_fd, HV_X86_RBP, 0x0);
+
+ for (int i = 0; i < 8; i++) {
+ wreg(cpu->hvf_fd, HV_X86_R8 + i, 0x0);
+ }
+
+ hv_vm_sync_tsc(0);
+ cpu->halted = 0;
+ hv_vcpu_invalidate_tlb(cpu->hvf_fd);
+ hv_vcpu_flush(cpu->hvf_fd);
+}
+
+void hvf_vcpu_destroy(CPUState *cpu)
+{
+ hv_return_t ret = hv_vcpu_destroy((hv_vcpuid_t)cpu->hvf_fd);
+ assert_hvf_ok(ret);
+}
+
+static void dummy_signal(int sig)
+{
+}
+
+int hvf_init_vcpu(CPUState *cpu)
+{
+
+ X86CPU *x86cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86cpu->env;
+ int r;
+
+ /* init cpu signals */
+ sigset_t set;
+ struct sigaction sigact;
+
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = dummy_signal;
+ sigaction(SIG_IPI, &sigact, NULL);
+
+ pthread_sigmask(SIG_BLOCK, NULL, &set);
+ sigdelset(&set, SIG_IPI);
+
+ init_emu();
+ init_decoder();
+
+ hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
+ env->hvf_emul = g_new0(HVFX86EmulatorState, 1);
+
+ r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf_fd, HV_VCPU_DEFAULT);
+ cpu->vcpu_dirty = 1;
+ assert_hvf_ok(r);
+
+ if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
+ &hvf_state->hvf_caps->vmx_cap_pinbased)) {
+ abort();
+ }
+ if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,
+ &hvf_state->hvf_caps->vmx_cap_procbased)) {
+ abort();
+ }
+ if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,
+ &hvf_state->hvf_caps->vmx_cap_procbased2)) {
+ abort();
+ }
+ if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,
+ &hvf_state->hvf_caps->vmx_cap_entry)) {
+ abort();
+ }
+
+ /* set VMCS control fields */
+ wvmcs(cpu->hvf_fd, VMCS_PIN_BASED_CTLS,
+ cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
+ VMCS_PIN_BASED_CTLS_EXTINT |
+ VMCS_PIN_BASED_CTLS_NMI |
+ VMCS_PIN_BASED_CTLS_VNMI));
+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS,
+ cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
+ VMCS_PRI_PROC_BASED_CTLS_HLT |
+ VMCS_PRI_PROC_BASED_CTLS_MWAIT |
+ VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
+ VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
+ VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
+ wvmcs(cpu->hvf_fd, VMCS_SEC_PROC_BASED_CTLS,
+ cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
+ VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
+
+ wvmcs(cpu->hvf_fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
+ 0));
+ wvmcs(cpu->hvf_fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
+
+ wvmcs(cpu->hvf_fd, VMCS_TPR_THRESHOLD, 0);
+
+ hvf_reset_vcpu(cpu);
+
+ x86cpu = X86_CPU(cpu);
+ x86cpu->env.kvm_xsave_buf = qemu_memalign(4096, 4096);
+
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_STAR, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_LSTAR, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_CSTAR, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FMASK, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_FSBASE, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_GSBASE, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_KERNELGSBASE, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_TSC_AUX, 1);
+ /*hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_TSC, 1);*/
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_CS, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_EIP, 1);
+ hv_vcpu_enable_native_msr(cpu->hvf_fd, MSR_IA32_SYSENTER_ESP, 1);
+
+ return 0;
+}
+
+void hvf_disable(int shouldDisable)
+{
+ hvf_disabled = shouldDisable;
+}
+
+static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ env->exception_injected = -1;
+ env->interrupt_injected = -1;
+ env->nmi_injected = false;
+ if (idtvec_info & VMCS_IDT_VEC_VALID) {
+ switch (idtvec_info & VMCS_IDT_VEC_TYPE) {
+ case VMCS_IDT_VEC_HWINTR:
+ case VMCS_IDT_VEC_SWINTR:
+ env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
+ break;
+ case VMCS_IDT_VEC_NMI:
+ env->nmi_injected = true;
+ break;
+ case VMCS_IDT_VEC_HWEXCEPTION:
+ case VMCS_IDT_VEC_SWEXCEPTION:
+ env->exception_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
+ break;
+ case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
+ default:
+ abort();
+ }
+ if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION ||
+ (idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) {
+ env->ins_len = ins_len;
+ }
+ if (idtvec_info & VMCS_INTR_DEL_ERRCODE) {
+ env->has_error_code = true;
+ env->error_code = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_ERROR);
+ }
+ }
+ if ((rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
+ VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+ if (rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) &
+ (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
+ env->hflags |= HF_INHIBIT_IRQ_MASK;
+ } else {
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ }
+}
+
+int hvf_vcpu_exec(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ int ret = 0;
+ uint64_t rip = 0;
+
+ cpu->halted = 0;
+
+ if (hvf_process_events(cpu)) {
+ return EXCP_HLT;
+ }
+
+ do {
+ if (cpu->vcpu_dirty) {
+ hvf_put_registers(cpu);
+ cpu->vcpu_dirty = false;
+ }
+
+ if (hvf_inject_interrupts(cpu)) {
+ return EXCP_INTERRUPT;
+ }
+ vmx_update_tpr(cpu);
+
+ qemu_mutex_unlock_iothread();
+ if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
+ qemu_mutex_lock_iothread();
+ return EXCP_HLT;
+ }
+
+ hv_return_t r = hv_vcpu_run(cpu->hvf_fd);
+ assert_hvf_ok(r);
+
+ /* handle VMEXIT */
+ uint64_t exit_reason = rvmcs(cpu->hvf_fd, VMCS_EXIT_REASON);
+ uint64_t exit_qual = rvmcs(cpu->hvf_fd, VMCS_EXIT_QUALIFICATION);
+ uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf_fd,
+ VMCS_EXIT_INSTRUCTION_LENGTH);
+
+ uint64_t idtvec_info = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
+
+ hvf_store_events(cpu, ins_len, idtvec_info);
+ rip = rreg(cpu->hvf_fd, HV_X86_RIP);
+ RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
+ env->eflags = RFLAGS(env);
+
+ qemu_mutex_lock_iothread();
+
+ update_apic_tpr(cpu);
+ current_cpu = cpu;
+
+ ret = 0;
+ switch (exit_reason) {
+ case EXIT_REASON_HLT: {
+ macvm_set_rip(cpu, rip + ins_len);
+ if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (EFLAGS(env) & IF_MASK))
+ && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(idtvec_info & VMCS_IDT_VEC_VALID)) {
+ cpu->halted = 1;
+ ret = EXCP_HLT;
+ }
+ ret = EXCP_INTERRUPT;
+ break;
+ }
+ case EXIT_REASON_MWAIT: {
+ ret = EXCP_INTERRUPT;
+ break;
+ }
+ /* Need to check if MMIO or unmmaped fault */
+ case EXIT_REASON_EPT_FAULT:
+ {
+ hvf_slot *slot;
+ uint64_t gpa = rvmcs(cpu->hvf_fd, VMCS_GUEST_PHYSICAL_ADDRESS);
+
+ if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
+ ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
+ vmx_set_nmi_blocking(cpu);
+ }
+
+ slot = hvf_find_overlap_slot(gpa, gpa);
+ /* mmio */
+ if (ept_emulation_fault(slot, gpa, exit_qual)) {
+ struct x86_decode decode;
+
+ load_regs(cpu);
+ env->hvf_emul->fetch_rip = rip;
+
+ decode_instruction(env, &decode);
+ exec_instruction(env, &decode);
+ store_regs(cpu);
+ break;
+ }
+ break;
+ }
+ case EXIT_REASON_INOUT:
+ {
+ uint32_t in = (exit_qual & 8) != 0;
+ uint32_t size = (exit_qual & 7) + 1;
+ uint32_t string = (exit_qual & 16) != 0;
+ uint32_t port = exit_qual >> 16;
+ /*uint32_t rep = (exit_qual & 0x20) != 0;*/
+
+ if (!string && in) {
+ uint64_t val = 0;
+ load_regs(cpu);
+ hvf_handle_io(env, port, &val, 0, size, 1);
+ if (size == 1) {
+ AL(env) = val;
+ } else if (size == 2) {
+ AX(env) = val;
+ } else if (size == 4) {
+ RAX(env) = (uint32_t)val;
+ } else {
+ RAX(env) = (uint64_t)val;
+ }
+ RIP(env) += ins_len;
+ store_regs(cpu);
+ break;
+ } else if (!string && !in) {
+ RAX(env) = rreg(cpu->hvf_fd, HV_X86_RAX);
+ hvf_handle_io(env, port, &RAX(env), 1, size, 1);
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ struct x86_decode decode;
+
+ load_regs(cpu);
+ env->hvf_emul->fetch_rip = rip;
+
+ decode_instruction(env, &decode);
+ assert(ins_len == decode.len);
+ exec_instruction(env, &decode);
+ store_regs(cpu);
+
+ break;
+ }
+ case EXIT_REASON_CPUID: {
+ uint32_t rax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
+ uint32_t rbx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RBX);
+ uint32_t rcx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
+ uint32_t rdx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
+
+ cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
+
+ wreg(cpu->hvf_fd, HV_X86_RAX, rax);
+ wreg(cpu->hvf_fd, HV_X86_RBX, rbx);
+ wreg(cpu->hvf_fd, HV_X86_RCX, rcx);
+ wreg(cpu->hvf_fd, HV_X86_RDX, rdx);
+
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ case EXIT_REASON_XSETBV: {
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint32_t eax = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RAX);
+ uint32_t ecx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RCX);
+ uint32_t edx = (uint32_t)rreg(cpu->hvf_fd, HV_X86_RDX);
+
+ if (ecx) {
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ env->xcr0 = ((uint64_t)edx << 32) | eax;
+ wreg(cpu->hvf_fd, HV_X86_XCR0, env->xcr0 | 1);
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ }
+ case EXIT_REASON_INTR_WINDOW:
+ vmx_clear_int_window_exiting(cpu);
+ ret = EXCP_INTERRUPT;
+ break;
+ case EXIT_REASON_NMI_WINDOW:
+ vmx_clear_nmi_window_exiting(cpu);
+ ret = EXCP_INTERRUPT;
+ break;
+ case EXIT_REASON_EXT_INTR:
+ /* force exit and allow io handling */
+ ret = EXCP_INTERRUPT;
+ break;
+ case EXIT_REASON_RDMSR:
+ case EXIT_REASON_WRMSR:
+ {
+ load_regs(cpu);
+ if (exit_reason == EXIT_REASON_RDMSR) {
+ simulate_rdmsr(cpu);
+ } else {
+ simulate_wrmsr(cpu);
+ }
+ RIP(env) += rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
+ store_regs(cpu);
+ break;
+ }
+ case EXIT_REASON_CR_ACCESS: {
+ int cr;
+ int reg;
+
+ load_regs(cpu);
+ cr = exit_qual & 15;
+ reg = (exit_qual >> 8) & 15;
+
+ switch (cr) {
+ case 0x0: {
+ macvm_set_cr0(cpu->hvf_fd, RRX(env, reg));
+ break;
+ }
+ case 4: {
+ macvm_set_cr4(cpu->hvf_fd, RRX(env, reg));
+ break;
+ }
+ case 8: {
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ if (exit_qual & 0x10) {
+ RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
+ } else {
+ int tpr = RRX(env, reg);
+ cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
+ ret = EXCP_INTERRUPT;
+ }
+ break;
+ }
+ default:
+ error_report("Unrecognized CR %d\n", cr);
+ abort();
+ }
+ RIP(env) += ins_len;
+ store_regs(cpu);
+ break;
+ }
+ case EXIT_REASON_APIC_ACCESS: { /* TODO */
+ struct x86_decode decode;
+
+ load_regs(cpu);
+ env->hvf_emul->fetch_rip = rip;
+
+ decode_instruction(env, &decode);
+ exec_instruction(env, &decode);
+ store_regs(cpu);
+ break;
+ }
+ case EXIT_REASON_TPR: {
+ ret = 1;
+ break;
+ }
+ case EXIT_REASON_TASK_SWITCH: {
+ uint64_t vinfo = rvmcs(cpu->hvf_fd, VMCS_IDT_VECTORING_INFO);
+ x68_segment_selector sel = {.sel = exit_qual & 0xffff};
+ vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
+ vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
+ & VMCS_INTR_T_MASK);
+ break;
+ }
+ case EXIT_REASON_TRIPLE_FAULT: {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ ret = EXCP_INTERRUPT;
+ break;
+ }
+ case EXIT_REASON_RDPMC:
+ wreg(cpu->hvf_fd, HV_X86_RAX, 0);
+ wreg(cpu->hvf_fd, HV_X86_RDX, 0);
+ macvm_set_rip(cpu, rip + ins_len);
+ break;
+ case VMX_REASON_VMCALL:
+ env->exception_injected = EXCP0D_GPF;
+ env->has_error_code = true;
+ env->error_code = 0;
+ break;
+ default:
+ error_report("%llx: unhandled exit %llx\n", rip, exit_reason);
+ }
+ } while (ret == 0);
+
+ return ret;
+}
+
+static bool hvf_allowed;
+
+static int hvf_accel_init(MachineState *ms)
+{
+ int x;
+ hv_return_t ret;
+ HVFState *s;
+
+ hvf_disable(0);
+ ret = hv_vm_create(HV_VM_DEFAULT);
+ assert_hvf_ok(ret);
+
+ s = g_new0(HVFState, 1);
+
+ s->num_slots = 32;
+ for (x = 0; x < s->num_slots; ++x) {
+ s->slots[x].size = 0;
+ s->slots[x].slot_id = x;
+ }
+
+ hvf_state = s;
+ cpu_interrupt_handler = hvf_handle_interrupt;
+ memory_listener_register(&hvf_memory_listener, &address_space_memory);
+ return 0;
+}
+
+static void hvf_accel_class_init(ObjectClass *oc, void *data)
+{
+ AccelClass *ac = ACCEL_CLASS(oc);
+ ac->name = "HVF";
+ ac->init_machine = hvf_accel_init;
+ ac->allowed = &hvf_allowed;
+}
+
+static const TypeInfo hvf_accel_type = {
+ .name = TYPE_HVF_ACCEL,
+ .parent = TYPE_ACCEL,
+ .class_init = hvf_accel_class_init,
+};
+
+static void hvf_type_init(void)
+{
+ type_register_static(&hvf_accel_type);
+}
+
+type_init(hvf_type_init);
diff --git a/target/i386/hvf/panic.h b/target/i386/hvf/panic.h
new file mode 100644
index 0000000000..411ef43a5b
--- /dev/null
+++ b/target/i386/hvf/panic.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HVF_PANIC_H
+#define HVF_PANIC_H
+
+#define VM_PANIC(x) {\
+ printf("%s\n", x); \
+ abort(); \
+}
+
+#define VM_PANIC_ON(x) {\
+ if (x) { \
+ printf("%s\n", #x); \
+ abort(); \
+ } \
+}
+
+#define VM_PANIC_EX(...) {\
+ printf(__VA_ARGS__); \
+ abort(); \
+}
+
+#define VM_PANIC_ON_EX(x, ...) {\
+ if (x) { \
+ printf(__VA_ARGS__); \
+ abort(); \
+ } \
+}
+
+#endif
diff --git a/target/i386/hvf/vmcs.h b/target/i386/hvf/vmcs.h
new file mode 100644
index 0000000000..2a8c0424a5
--- /dev/null
+++ b/target/i386/hvf/vmcs.h
@@ -0,0 +1,374 @@
+/*-
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VMCS_H_
+#define _VMCS_H_
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+
+#define VMCS_INITIAL 0xffffffffffffffff
+
+#define VMCS_IDENT(encoding) ((encoding) | 0x80000000)
+/*
+ * VMCS field encodings from Appendix H, Intel Architecture Manual Vol3B.
+ */
+#define VMCS_INVALID_ENCODING 0xffffffff
+
+/* 16-bit control fields */
+#define VMCS_VPID 0x00000000
+#define VMCS_PIR_VECTOR 0x00000002
+
+/* 16-bit guest-state fields */
+#define VMCS_GUEST_ES_SELECTOR 0x00000800
+#define VMCS_GUEST_CS_SELECTOR 0x00000802
+#define VMCS_GUEST_SS_SELECTOR 0x00000804
+#define VMCS_GUEST_DS_SELECTOR 0x00000806
+#define VMCS_GUEST_FS_SELECTOR 0x00000808
+#define VMCS_GUEST_GS_SELECTOR 0x0000080A
+#define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
+#define VMCS_GUEST_TR_SELECTOR 0x0000080E
+#define VMCS_GUEST_INTR_STATUS 0x00000810
+
+/* 16-bit host-state fields */
+#define VMCS_HOST_ES_SELECTOR 0x00000C00
+#define VMCS_HOST_CS_SELECTOR 0x00000C02
+#define VMCS_HOST_SS_SELECTOR 0x00000C04
+#define VMCS_HOST_DS_SELECTOR 0x00000C06
+#define VMCS_HOST_FS_SELECTOR 0x00000C08
+#define VMCS_HOST_GS_SELECTOR 0x00000C0A
+#define VMCS_HOST_TR_SELECTOR 0x00000C0C
+
+/* 64-bit control fields */
+#define VMCS_IO_BITMAP_A 0x00002000
+#define VMCS_IO_BITMAP_B 0x00002002
+#define VMCS_MSR_BITMAP 0x00002004
+#define VMCS_EXIT_MSR_STORE 0x00002006
+#define VMCS_EXIT_MSR_LOAD 0x00002008
+#define VMCS_ENTRY_MSR_LOAD 0x0000200A
+#define VMCS_EXECUTIVE_VMCS 0x0000200C
+#define VMCS_TSC_OFFSET 0x00002010
+#define VMCS_VIRTUAL_APIC 0x00002012
+#define VMCS_APIC_ACCESS 0x00002014
+#define VMCS_PIR_DESC 0x00002016
+#define VMCS_EPTP 0x0000201A
+#define VMCS_EOI_EXIT0 0x0000201C
+#define VMCS_EOI_EXIT1 0x0000201E
+#define VMCS_EOI_EXIT2 0x00002020
+#define VMCS_EOI_EXIT3 0x00002022
+#define VMCS_EOI_EXIT(vector) (VMCS_EOI_EXIT0 + ((vector) / 64) * 2)
+
+/* 64-bit read-only fields */
+#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
+
+/* 64-bit guest-state fields */
+#define VMCS_LINK_POINTER 0x00002800
+#define VMCS_GUEST_IA32_DEBUGCTL 0x00002802
+#define VMCS_GUEST_IA32_PAT 0x00002804
+#define VMCS_GUEST_IA32_EFER 0x00002806
+#define VMCS_GUEST_IA32_PERF_GLOBAL_CTRL 0x00002808
+#define VMCS_GUEST_PDPTE0 0x0000280A
+#define VMCS_GUEST_PDPTE1 0x0000280C
+#define VMCS_GUEST_PDPTE2 0x0000280E
+#define VMCS_GUEST_PDPTE3 0x00002810
+
+/* 64-bit host-state fields */
+#define VMCS_HOST_IA32_PAT 0x00002C00
+#define VMCS_HOST_IA32_EFER 0x00002C02
+#define VMCS_HOST_IA32_PERF_GLOBAL_CTRL 0x00002C04
+
+/* 32-bit control fields */
+#define VMCS_PIN_BASED_CTLS 0x00004000
+#define VMCS_PRI_PROC_BASED_CTLS 0x00004002
+#define VMCS_EXCEPTION_BITMAP 0x00004004
+#define VMCS_PF_ERROR_MASK 0x00004006
+#define VMCS_PF_ERROR_MATCH 0x00004008
+#define VMCS_CR3_TARGET_COUNT 0x0000400A
+#define VMCS_EXIT_CTLS 0x0000400C
+#define VMCS_EXIT_MSR_STORE_COUNT 0x0000400E
+#define VMCS_EXIT_MSR_LOAD_COUNT 0x00004010
+#define VMCS_ENTRY_CTLS 0x00004012
+#define VMCS_ENTRY_MSR_LOAD_COUNT 0x00004014
+#define VMCS_ENTRY_INTR_INFO 0x00004016
+#define VMCS_ENTRY_EXCEPTION_ERROR 0x00004018
+#define VMCS_ENTRY_INST_LENGTH 0x0000401A
+#define VMCS_TPR_THRESHOLD 0x0000401C
+#define VMCS_SEC_PROC_BASED_CTLS 0x0000401E
+#define VMCS_PLE_GAP 0x00004020
+#define VMCS_PLE_WINDOW 0x00004022
+
+/* 32-bit read-only data fields */
+#define VMCS_INSTRUCTION_ERROR 0x00004400
+#define VMCS_EXIT_REASON 0x00004402
+#define VMCS_EXIT_INTR_INFO 0x00004404
+#define VMCS_EXIT_INTR_ERRCODE 0x00004406
+#define VMCS_IDT_VECTORING_INFO 0x00004408
+#define VMCS_IDT_VECTORING_ERROR 0x0000440A
+#define VMCS_EXIT_INSTRUCTION_LENGTH 0x0000440C
+#define VMCS_EXIT_INSTRUCTION_INFO 0x0000440E
+
+/* 32-bit guest-state fields */
+#define VMCS_GUEST_ES_LIMIT 0x00004800
+#define VMCS_GUEST_CS_LIMIT 0x00004802
+#define VMCS_GUEST_SS_LIMIT 0x00004804
+#define VMCS_GUEST_DS_LIMIT 0x00004806
+#define VMCS_GUEST_FS_LIMIT 0x00004808
+#define VMCS_GUEST_GS_LIMIT 0x0000480A
+#define VMCS_GUEST_LDTR_LIMIT 0x0000480C
+#define VMCS_GUEST_TR_LIMIT 0x0000480E
+#define VMCS_GUEST_GDTR_LIMIT 0x00004810
+#define VMCS_GUEST_IDTR_LIMIT 0x00004812
+#define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
+#define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
+#define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
+#define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
+#define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
+#define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
+#define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
+#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
+#define VMCS_GUEST_INTERRUPTIBILITY 0x00004824
+#define VMCS_GUEST_ACTIVITY 0x00004826
+#define VMCS_GUEST_SMBASE 0x00004828
+#define VMCS_GUEST_IA32_SYSENTER_CS 0x0000482A
+#define VMCS_PREEMPTION_TIMER_VALUE 0x0000482E
+
+/* 32-bit host state fields */
+#define VMCS_HOST_IA32_SYSENTER_CS 0x00004C00
+
+/* Natural Width control fields */
+#define VMCS_CR0_MASK 0x00006000
+#define VMCS_CR4_MASK 0x00006002
+#define VMCS_CR0_SHADOW 0x00006004
+#define VMCS_CR4_SHADOW 0x00006006
+#define VMCS_CR3_TARGET0 0x00006008
+#define VMCS_CR3_TARGET1 0x0000600A
+#define VMCS_CR3_TARGET2 0x0000600C
+#define VMCS_CR3_TARGET3 0x0000600E
+
+/* Natural Width read-only fields */
+#define VMCS_EXIT_QUALIFICATION 0x00006400
+#define VMCS_IO_RCX 0x00006402
+#define VMCS_IO_RSI 0x00006404
+#define VMCS_IO_RDI 0x00006406
+#define VMCS_IO_RIP 0x00006408
+#define VMCS_GUEST_LINEAR_ADDRESS 0x0000640A
+
+/* Natural Width guest-state fields */
+#define VMCS_GUEST_CR0 0x00006800
+#define VMCS_GUEST_CR3 0x00006802
+#define VMCS_GUEST_CR4 0x00006804
+#define VMCS_GUEST_ES_BASE 0x00006806
+#define VMCS_GUEST_CS_BASE 0x00006808
+#define VMCS_GUEST_SS_BASE 0x0000680A
+#define VMCS_GUEST_DS_BASE 0x0000680C
+#define VMCS_GUEST_FS_BASE 0x0000680E
+#define VMCS_GUEST_GS_BASE 0x00006810
+#define VMCS_GUEST_LDTR_BASE 0x00006812
+#define VMCS_GUEST_TR_BASE 0x00006814
+#define VMCS_GUEST_GDTR_BASE 0x00006816
+#define VMCS_GUEST_IDTR_BASE 0x00006818
+#define VMCS_GUEST_DR7 0x0000681A
+#define VMCS_GUEST_RSP 0x0000681C
+#define VMCS_GUEST_RIP 0x0000681E
+#define VMCS_GUEST_RFLAGS 0x00006820
+#define VMCS_GUEST_PENDING_DBG_EXCEPTIONS 0x00006822
+#define VMCS_GUEST_IA32_SYSENTER_ESP 0x00006824
+#define VMCS_GUEST_IA32_SYSENTER_EIP 0x00006826
+
+/* Natural Width host-state fields */
+#define VMCS_HOST_CR0 0x00006C00
+#define VMCS_HOST_CR3 0x00006C02
+#define VMCS_HOST_CR4 0x00006C04
+#define VMCS_HOST_FS_BASE 0x00006C06
+#define VMCS_HOST_GS_BASE 0x00006C08
+#define VMCS_HOST_TR_BASE 0x00006C0A
+#define VMCS_HOST_GDTR_BASE 0x00006C0C
+#define VMCS_HOST_IDTR_BASE 0x00006C0E
+#define VMCS_HOST_IA32_SYSENTER_ESP 0x00006C10
+#define VMCS_HOST_IA32_SYSENTER_EIP 0x00006C12
+#define VMCS_HOST_RSP 0x00006C14
+#define VMCS_HOST_RIP 0x00006c16
+
+/*
+ * VM instruction error numbers
+ */
+#define VMRESUME_WITH_NON_LAUNCHED_VMCS 5
+
+/*
+ * VMCS exit reasons
+ */
+#define EXIT_REASON_EXCEPTION 0
+#define EXIT_REASON_EXT_INTR 1
+#define EXIT_REASON_TRIPLE_FAULT 2
+#define EXIT_REASON_INIT 3
+#define EXIT_REASON_SIPI 4
+#define EXIT_REASON_IO_SMI 5
+#define EXIT_REASON_SMI 6
+#define EXIT_REASON_INTR_WINDOW 7
+#define EXIT_REASON_NMI_WINDOW 8
+#define EXIT_REASON_TASK_SWITCH 9
+#define EXIT_REASON_CPUID 10
+#define EXIT_REASON_GETSEC 11
+#define EXIT_REASON_HLT 12
+#define EXIT_REASON_INVD 13
+#define EXIT_REASON_INVLPG 14
+#define EXIT_REASON_RDPMC 15
+#define EXIT_REASON_RDTSC 16
+#define EXIT_REASON_RSM 17
+#define EXIT_REASON_VMCALL 18
+#define EXIT_REASON_VMCLEAR 19
+#define EXIT_REASON_VMLAUNCH 20
+#define EXIT_REASON_VMPTRLD 21
+#define EXIT_REASON_VMPTRST 22
+#define EXIT_REASON_VMREAD 23
+#define EXIT_REASON_VMRESUME 24
+#define EXIT_REASON_VMWRITE 25
+#define EXIT_REASON_VMXOFF 26
+#define EXIT_REASON_VMXON 27
+#define EXIT_REASON_CR_ACCESS 28
+#define EXIT_REASON_DR_ACCESS 29
+#define EXIT_REASON_INOUT 30
+#define EXIT_REASON_RDMSR 31
+#define EXIT_REASON_WRMSR 32
+#define EXIT_REASON_INVAL_VMCS 33
+#define EXIT_REASON_INVAL_MSR 34
+#define EXIT_REASON_MWAIT 36
+#define EXIT_REASON_MTF 37
+#define EXIT_REASON_MONITOR 39
+#define EXIT_REASON_PAUSE 40
+#define EXIT_REASON_MCE_DURING_ENTR 41
+#define EXIT_REASON_TPR 43
+#define EXIT_REASON_APIC_ACCESS 44
+#define EXIT_REASON_VIRTUALIZED_EOI 45
+#define EXIT_REASON_GDTR_IDTR 46
+#define EXIT_REASON_LDTR_TR 47
+#define EXIT_REASON_EPT_FAULT 48
+#define EXIT_REASON_EPT_MISCONFIG 49
+#define EXIT_REASON_INVEPT 50
+#define EXIT_REASON_RDTSCP 51
+#define EXIT_REASON_VMX_PREEMPT 52
+#define EXIT_REASON_INVVPID 53
+#define EXIT_REASON_WBINVD 54
+#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_APIC_WRITE 56
+
+/*
+ * NMI unblocking due to IRET.
+ *
+ * Applies to VM-exits due to hardware exception or EPT fault.
+ */
+#define EXIT_QUAL_NMIUDTI (1 << 12)
+/*
+ * VMCS interrupt information fields
+ */
+#define VMCS_INTR_VALID (1U << 31)
+#define VMCS_INTR_T_MASK 0x700 /* Interruption-info type */
+#define VMCS_INTR_T_HWINTR (0 << 8)
+#define VMCS_INTR_T_NMI (2 << 8)
+#define VMCS_INTR_T_HWEXCEPTION (3 << 8)
+#define VMCS_INTR_T_SWINTR (4 << 8)
+#define VMCS_INTR_T_PRIV_SWEXCEPTION (5 << 8)
+#define VMCS_INTR_T_SWEXCEPTION (6 << 8)
+#define VMCS_INTR_DEL_ERRCODE (1 << 11)
+
+/*
+ * VMCS IDT-Vectoring information fields
+ */
+#define VMCS_IDT_VEC_VECNUM 0xFF
+#define VMCS_IDT_VEC_VALID (1U << 31)
+#define VMCS_IDT_VEC_TYPE 0x700
+#define VMCS_IDT_VEC_ERRCODE_VALID (1U << 11)
+#define VMCS_IDT_VEC_HWINTR (0 << 8)
+#define VMCS_IDT_VEC_NMI (2 << 8)
+#define VMCS_IDT_VEC_HWEXCEPTION (3 << 8)
+#define VMCS_IDT_VEC_SWINTR (4 << 8)
+#define VMCS_IDT_VEC_PRIV_SWEXCEPTION (5 << 8)
+#define VMCS_IDT_VEC_SWEXCEPTION (6 << 8)
+
+/*
+ * VMCS Guest interruptibility field
+ */
+#define VMCS_INTERRUPTIBILITY_STI_BLOCKING (1 << 0)
+#define VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING (1 << 1)
+#define VMCS_INTERRUPTIBILITY_SMI_BLOCKING (1 << 2)
+#define VMCS_INTERRUPTIBILITY_NMI_BLOCKING (1 << 3)
+
+/*
+ * Exit qualification for EXIT_REASON_INVAL_VMCS
+ */
+#define EXIT_QUAL_NMI_WHILE_STI_BLOCKING 3
+
+/*
+ * Exit qualification for EPT violation
+ */
+#define EPT_VIOLATION_DATA_READ (1UL << 0)
+#define EPT_VIOLATION_DATA_WRITE (1UL << 1)
+#define EPT_VIOLATION_INST_FETCH (1UL << 2)
+#define EPT_VIOLATION_GPA_READABLE (1UL << 3)
+#define EPT_VIOLATION_GPA_WRITEABLE (1UL << 4)
+#define EPT_VIOLATION_GPA_EXECUTABLE (1UL << 5)
+#define EPT_VIOLATION_GLA_VALID (1UL << 7)
+#define EPT_VIOLATION_XLAT_VALID (1UL << 8)
+
+/*
+ * Exit qualification for APIC-access VM exit
+ */
+#define APIC_ACCESS_OFFSET(qual) ((qual) & 0xFFF)
+#define APIC_ACCESS_TYPE(qual) (((qual) >> 12) & 0xF)
+
+/*
+ * Exit qualification for APIC-write VM exit
+ */
+#define APIC_WRITE_OFFSET(qual) ((qual) & 0xFFF)
+
+#define VMCS_PIN_BASED_CTLS_EXTINT (1 << 0)
+#define VMCS_PIN_BASED_CTLS_NMI (1 << 3)
+#define VMCS_PIN_BASED_CTLS_VNMI (1 << 5)
+
+#define VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING (1 << 2)
+#define VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET (1 << 3)
+#define VMCS_PRI_PROC_BASED_CTLS_HLT (1 << 7)
+#define VMCS_PRI_PROC_BASED_CTLS_MWAIT (1 << 10)
+#define VMCS_PRI_PROC_BASED_CTLS_TSC (1 << 12)
+#define VMCS_PRI_PROC_BASED_CTLS_CR8_LOAD (1 << 19)
+#define VMCS_PRI_PROC_BASED_CTLS_CR8_STORE (1 << 20)
+#define VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW (1 << 21)
+#define VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING (1 << 22)
+#define VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL (1 << 31)
+
+#define VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES (1 << 0)
+#define VMCS_PRI_PROC_BASED2_CTLS_X2APIC (1 << 4)
+
+enum task_switch_reason {
+ TSR_CALL,
+ TSR_IRET,
+ TSR_JMP,
+ TSR_IDT_GATE, /* task gate in IDT */
+};
+
+#endif
diff --git a/target/i386/hvf/vmx.h b/target/i386/hvf/vmx.h
new file mode 100644
index 0000000000..9dfcd2f2eb
--- /dev/null
+++ b/target/i386/hvf/vmx.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ * Based on Veertu vddh/vmm/vmx.h
+ *
+ * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef VMX_H
+#define VMX_H
+
+#include <stdint.h>
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+#include "vmcs.h"
+#include "cpu.h"
+#include "x86.h"
+
+#include "exec/address-spaces.h"
+
+static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
+{
+ uint64_t v;
+
+ if (hv_vcpu_read_register(vcpu, reg, &v)) {
+ abort();
+ }
+
+ return v;
+}
+
+/* write GPR */
+static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
+{
+ if (hv_vcpu_write_register(vcpu, reg, v)) {
+ abort();
+ }
+}
+
+/* read VMCS field */
+static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
+{
+ uint64_t v;
+
+ hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
+
+ return v;
+}
+
+/* write VMCS field */
+static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
+{
+ hv_vmx_vcpu_write_vmcs(vcpu, field, v);
+}
+
+/* desired control word constrained by hardware/hypervisor capabilities */
+static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
+{
+ return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
+}
+
+#define VM_ENTRY_GUEST_LMA (1LL << 9)
+
+#define AR_TYPE_ACCESSES_MASK 1
+#define AR_TYPE_READABLE_MASK (1 << 1)
+#define AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define AR_TYPE_CODE_MASK (1 << 3)
+#define AR_TYPE_MASK 0x0f
+#define AR_TYPE_BUSY_64_TSS 11
+#define AR_TYPE_BUSY_32_TSS 11
+#define AR_TYPE_BUSY_16_TSS 3
+#define AR_TYPE_LDT 2
+
+static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
+{
+ uint64_t entry_ctls;
+
+ efer |= MSR_EFER_LMA;
+ wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
+ entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
+ wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
+ VM_ENTRY_GUEST_LMA);
+
+ uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
+ if ((efer & MSR_EFER_LME) &&
+ (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
+ wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
+ (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
+ }
+}
+
+static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
+{
+ uint64_t entry_ctls;
+
+ entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
+ wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
+
+ efer &= ~MSR_EFER_LMA;
+ wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
+}
+
+static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
+{
+ int i;
+ uint64_t pdpte[4] = {0, 0, 0, 0};
+ uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
+ uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
+
+ if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
+ !(efer & MSR_EFER_LME)) {
+ address_space_rw(&address_space_memory,
+ rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
+ MEMTXATTRS_UNSPECIFIED,
+ (uint8_t *)pdpte, 32, 0);
+ }
+
+ for (i = 0; i < 4; i++) {
+ wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
+ }
+
+ wvmcs(vcpu, VMCS_CR0_MASK, CR0_CD | CR0_NE | CR0_PG);
+ wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
+
+ cr0 &= ~CR0_CD;
+ wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
+
+ if (efer & MSR_EFER_LME) {
+ if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
+ enter_long_mode(vcpu, cr0, efer);
+ }
+ if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {
+ exit_long_mode(vcpu, cr0, efer);
+ }
+ }
+
+ hv_vcpu_invalidate_tlb(vcpu);
+ hv_vcpu_flush(vcpu);
+}
+
+static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
+{
+ uint64_t guest_cr4 = cr4 | CR4_VMXE;
+
+ wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
+ wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
+
+ hv_vcpu_invalidate_tlb(vcpu);
+ hv_vcpu_flush(vcpu);
+}
+
+static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
+{
+ uint64_t val;
+
+ /* BUG, should take considering overlap.. */
+ wreg(cpu->hvf_fd, HV_X86_RIP, rip);
+
+ /* after moving forward in rip, we need to clean INTERRUPTABILITY */
+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
+ if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
+ val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
+ VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
+ }
+}
+
+static inline void vmx_clear_nmi_blocking(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ env->hflags2 &= ~HF2_NMI_MASK;
+ uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
+ gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
+}
+
+static inline void vmx_set_nmi_blocking(CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ env->hflags2 |= HF2_NMI_MASK;
+ uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
+ gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
+}
+
+static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
+{
+ uint64_t val;
+ val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
+ VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
+
+}
+
+static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
+{
+
+ uint64_t val;
+ val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
+ ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
+}
+
+#endif
diff --git a/target/i386/hvf/x86.c b/target/i386/hvf/x86.c
new file mode 100644
index 0000000000..3afcedc7fc
--- /dev/null
+++ b/target/i386/hvf/x86.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+#include "vmcs.h"
+#include "vmx.h"
+#include "x86_mmu.h"
+#include "x86_descr.h"
+
+/* static uint32_t x86_segment_access_rights(struct x86_segment_descriptor *var)
+{
+ uint32_t ar;
+
+ if (!var->p) {
+ ar = 1 << 16;
+ return ar;
+ }
+
+ ar = var->type & 15;
+ ar |= (var->s & 1) << 4;
+ ar |= (var->dpl & 3) << 5;
+ ar |= (var->p & 1) << 7;
+ ar |= (var->avl & 1) << 12;
+ ar |= (var->l & 1) << 13;
+ ar |= (var->db & 1) << 14;
+ ar |= (var->g & 1) << 15;
+ return ar;
+}*/
+
+bool x86_read_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel)
+{
+ target_ulong base;
+ uint32_t limit;
+
+ memset(desc, 0, sizeof(*desc));
+
+ /* valid gdt descriptors start from index 1 */
+ if (!sel.index && GDT_SEL == sel.ti) {
+ return false;
+ }
+
+ if (GDT_SEL == sel.ti) {
+ base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
+ limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
+ } else {
+ base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
+ limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
+ }
+
+ if (sel.index * 8 >= limit) {
+ return false;
+ }
+
+ vmx_read_mem(cpu, desc, base + sel.index * 8, sizeof(*desc));
+ return true;
+}
+
+bool x86_write_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel)
+{
+ target_ulong base;
+ uint32_t limit;
+
+ if (GDT_SEL == sel.ti) {
+ base = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_BASE);
+ limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
+ } else {
+ base = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_BASE);
+ limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_LDTR_LIMIT);
+ }
+
+ if (sel.index * 8 >= limit) {
+ printf("%s: gdt limit\n", __func__);
+ return false;
+ }
+ vmx_write_mem(cpu, base + sel.index * 8, desc, sizeof(*desc));
+ return true;
+}
+
+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
+ int gate)
+{
+ target_ulong base = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_BASE);
+ uint32_t limit = rvmcs(cpu->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
+
+ memset(idt_desc, 0, sizeof(*idt_desc));
+ if (gate * 8 >= limit) {
+ printf("%s: idt limit\n", __func__);
+ return false;
+ }
+
+ vmx_read_mem(cpu, idt_desc, base + gate * 8, sizeof(*idt_desc));
+ return true;
+}
+
+bool x86_is_protected(struct CPUState *cpu)
+{
+ uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
+ return cr0 & CR0_PE;
+}
+
+bool x86_is_real(struct CPUState *cpu)
+{
+ return !x86_is_protected(cpu);
+}
+
+bool x86_is_v8086(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM);
+}
+
+bool x86_is_long_mode(struct CPUState *cpu)
+{
+ return rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER) & MSR_EFER_LMA;
+}
+
+bool x86_is_long64_mode(struct CPUState *cpu)
+{
+ struct vmx_segment desc;
+ vmx_read_segment_descriptor(cpu, &desc, R_CS);
+
+ return x86_is_long_mode(cpu) && ((desc.ar >> 13) & 1);
+}
+
+bool x86_is_paging_mode(struct CPUState *cpu)
+{
+ uint64_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
+ return cr0 & CR0_PG;
+}
+
+bool x86_is_pae_enabled(struct CPUState *cpu)
+{
+ uint64_t cr4 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR4);
+ return cr4 & CR4_PAE;
+}
+
+target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, X86Seg seg)
+{
+ return vmx_read_segment_base(cpu, seg) + addr;
+}
+
+target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
+ X86Seg seg)
+{
+ switch (size) {
+ case 2:
+ addr = (uint16_t)addr;
+ break;
+ case 4:
+ addr = (uint32_t)addr;
+ break;
+ default:
+ break;
+ }
+ return linear_addr(cpu, addr, seg);
+}
+
+target_ulong linear_rip(struct CPUState *cpu, target_ulong rip)
+{
+ return linear_addr(cpu, rip, R_CS);
+}
diff --git a/target/i386/hvf/x86.h b/target/i386/hvf/x86.h
new file mode 100644
index 0000000000..103ec0976c
--- /dev/null
+++ b/target/i386/hvf/x86.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Veertu Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_H
+#define HVF_X86_H 1
+
+typedef struct x86_register {
+ union {
+ struct {
+ uint64_t rrx; /* full 64 bit */
+ };
+ struct {
+ uint32_t erx; /* low 32 bit part */
+ uint32_t hi32_unused1;
+ };
+ struct {
+ uint16_t rx; /* low 16 bit part */
+ uint16_t hi16_unused1;
+ uint32_t hi32_unused2;
+ };
+ struct {
+ uint8_t lx; /* low 8 bit part */
+ uint8_t hx; /* high 8 bit */
+ uint16_t hi16_unused2;
+ uint32_t hi32_unused3;
+ };
+ };
+} __attribute__ ((__packed__)) x86_register;
+
+typedef enum x86_rflags {
+ RFLAGS_CF = (1L << 0),
+ RFLAGS_PF = (1L << 2),
+ RFLAGS_AF = (1L << 4),
+ RFLAGS_ZF = (1L << 6),
+ RFLAGS_SF = (1L << 7),
+ RFLAGS_TF = (1L << 8),
+ RFLAGS_IF = (1L << 9),
+ RFLAGS_DF = (1L << 10),
+ RFLAGS_OF = (1L << 11),
+ RFLAGS_IOPL = (3L << 12),
+ RFLAGS_NT = (1L << 14),
+ RFLAGS_RF = (1L << 16),
+ RFLAGS_VM = (1L << 17),
+ RFLAGS_AC = (1L << 18),
+ RFLAGS_VIF = (1L << 19),
+ RFLAGS_VIP = (1L << 20),
+ RFLAGS_ID = (1L << 21),
+} x86_rflags;
+
+/* rflags register */
+typedef struct x86_reg_flags {
+ union {
+ struct {
+ uint64_t rflags;
+ };
+ struct {
+ uint32_t eflags;
+ uint32_t hi32_unused1;
+ };
+ struct {
+ uint32_t cf:1;
+ uint32_t unused1:1;
+ uint32_t pf:1;
+ uint32_t unused2:1;
+ uint32_t af:1;
+ uint32_t unused3:1;
+ uint32_t zf:1;
+ uint32_t sf:1;
+ uint32_t tf:1;
+ uint32_t ief:1;
+ uint32_t df:1;
+ uint32_t of:1;
+ uint32_t iopl:2;
+ uint32_t nt:1;
+ uint32_t unused4:1;
+ uint32_t rf:1;
+ uint32_t vm:1;
+ uint32_t ac:1;
+ uint32_t vif:1;
+ uint32_t vip:1;
+ uint32_t id:1;
+ uint32_t unused5:10;
+ uint32_t hi32_unused2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_reg_flags;
+
+typedef enum x86_reg_cr0 {
+ CR0_PE = (1L << 0),
+ CR0_MP = (1L << 1),
+ CR0_EM = (1L << 2),
+ CR0_TS = (1L << 3),
+ CR0_ET = (1L << 4),
+ CR0_NE = (1L << 5),
+ CR0_WP = (1L << 16),
+ CR0_AM = (1L << 18),
+ CR0_NW = (1L << 29),
+ CR0_CD = (1L << 30),
+ CR0_PG = (1L << 31),
+} x86_reg_cr0;
+
+typedef enum x86_reg_cr4 {
+ CR4_VME = (1L << 0),
+ CR4_PVI = (1L << 1),
+ CR4_TSD = (1L << 2),
+ CR4_DE = (1L << 3),
+ CR4_PSE = (1L << 4),
+ CR4_PAE = (1L << 5),
+ CR4_MSE = (1L << 6),
+ CR4_PGE = (1L << 7),
+ CR4_PCE = (1L << 8),
+ CR4_OSFXSR = (1L << 9),
+ CR4_OSXMMEXCPT = (1L << 10),
+ CR4_VMXE = (1L << 13),
+ CR4_SMXE = (1L << 14),
+ CR4_FSGSBASE = (1L << 16),
+ CR4_PCIDE = (1L << 17),
+ CR4_OSXSAVE = (1L << 18),
+ CR4_SMEP = (1L << 20),
+} x86_reg_cr4;
+
+/* 16 bit Task State Segment */
+typedef struct x86_tss_segment16 {
+ uint16_t link;
+ uint16_t sp0;
+ uint16_t ss0;
+ uint32_t sp1;
+ uint16_t ss1;
+ uint32_t sp2;
+ uint16_t ss2;
+ uint16_t ip;
+ uint16_t flags;
+ uint16_t ax;
+ uint16_t cx;
+ uint16_t dx;
+ uint16_t bx;
+ uint16_t sp;
+ uint16_t bp;
+ uint16_t si;
+ uint16_t di;
+ uint16_t es;
+ uint16_t cs;
+ uint16_t ss;
+ uint16_t ds;
+ uint16_t ldtr;
+} __attribute__((packed)) x86_tss_segment16;
+
+/* 32 bit Task State Segment */
+typedef struct x86_tss_segment32 {
+ uint32_t prev_tss;
+ uint32_t esp0;
+ uint32_t ss0;
+ uint32_t esp1;
+ uint32_t ss1;
+ uint32_t esp2;
+ uint32_t ss2;
+ uint32_t cr3;
+ uint32_t eip;
+ uint32_t eflags;
+ uint32_t eax;
+ uint32_t ecx;
+ uint32_t edx;
+ uint32_t ebx;
+ uint32_t esp;
+ uint32_t ebp;
+ uint32_t esi;
+ uint32_t edi;
+ uint32_t es;
+ uint32_t cs;
+ uint32_t ss;
+ uint32_t ds;
+ uint32_t fs;
+ uint32_t gs;
+ uint32_t ldt;
+ uint16_t trap;
+ uint16_t iomap_base;
+} __attribute__ ((__packed__)) x86_tss_segment32;
+
+/* 64 bit Task State Segment */
+typedef struct x86_tss_segment64 {
+ uint32_t unused;
+ uint64_t rsp0;
+ uint64_t rsp1;
+ uint64_t rsp2;
+ uint64_t unused1;
+ uint64_t ist1;
+ uint64_t ist2;
+ uint64_t ist3;
+ uint64_t ist4;
+ uint64_t ist5;
+ uint64_t ist6;
+ uint64_t ist7;
+ uint64_t unused2;
+ uint16_t unused3;
+ uint16_t iomap_base;
+} __attribute__ ((__packed__)) x86_tss_segment64;
+
+/* segment descriptors */
+typedef struct x86_segment_descriptor {
+ uint64_t limit0:16;
+ uint64_t base0:16;
+ uint64_t base1:8;
+ uint64_t type:4;
+ uint64_t s:1;
+ uint64_t dpl:2;
+ uint64_t p:1;
+ uint64_t limit1:4;
+ uint64_t avl:1;
+ uint64_t l:1;
+ uint64_t db:1;
+ uint64_t g:1;
+ uint64_t base2:8;
+} __attribute__ ((__packed__)) x86_segment_descriptor;
+
+static inline uint32_t x86_segment_base(x86_segment_descriptor *desc)
+{
+ return (uint32_t)((desc->base2 << 24) | (desc->base1 << 16) | desc->base0);
+}
+
+static inline void x86_set_segment_base(x86_segment_descriptor *desc,
+ uint32_t base)
+{
+ desc->base2 = base >> 24;
+ desc->base1 = (base >> 16) & 0xff;
+ desc->base0 = base & 0xffff;
+}
+
+static inline uint32_t x86_segment_limit(x86_segment_descriptor *desc)
+{
+ uint32_t limit = (uint32_t)((desc->limit1 << 16) | desc->limit0);
+ if (desc->g) {
+ return (limit << 12) | 0xfff;
+ }
+ return limit;
+}
+
+static inline void x86_set_segment_limit(x86_segment_descriptor *desc,
+ uint32_t limit)
+{
+ desc->limit0 = limit & 0xffff;
+ desc->limit1 = limit >> 16;
+}
+
+typedef struct x86_call_gate {
+ uint64_t offset0:16;
+ uint64_t selector:16;
+ uint64_t param_count:4;
+ uint64_t reserved:3;
+ uint64_t type:4;
+ uint64_t dpl:1;
+ uint64_t p:1;
+ uint64_t offset1:16;
+} __attribute__ ((__packed__)) x86_call_gate;
+
+static inline uint32_t x86_call_gate_offset(x86_call_gate *gate)
+{
+ return (uint32_t)((gate->offset1 << 16) | gate->offset0);
+}
+
+#define LDT_SEL 0
+#define GDT_SEL 1
+
+typedef struct x68_segment_selector {
+ union {
+ uint16_t sel;
+ struct {
+ uint16_t rpl:3;
+ uint16_t ti:1;
+ uint16_t index:12;
+ };
+ };
+} __attribute__ ((__packed__)) x68_segment_selector;
+
+typedef struct lazy_flags {
+ target_ulong result;
+ target_ulong auxbits;
+} lazy_flags;
+
+/* Definition of hvf_x86_state is here */
+struct HVFX86EmulatorState {
+ int interruptable;
+ uint64_t fetch_rip;
+ uint64_t rip;
+ struct x86_register regs[16];
+ struct x86_reg_flags rflags;
+ struct lazy_flags lflags;
+ uint8_t mmio_buf[4096];
+};
+
+/* useful register access macros */
+#define RIP(cpu) (cpu->hvf_emul->rip)
+#define EIP(cpu) ((uint32_t)cpu->hvf_emul->rip)
+#define RFLAGS(cpu) (cpu->hvf_emul->rflags.rflags)
+#define EFLAGS(cpu) (cpu->hvf_emul->rflags.eflags)
+
+#define RRX(cpu, reg) (cpu->hvf_emul->regs[reg].rrx)
+#define RAX(cpu) RRX(cpu, R_EAX)
+#define RCX(cpu) RRX(cpu, R_ECX)
+#define RDX(cpu) RRX(cpu, R_EDX)
+#define RBX(cpu) RRX(cpu, R_EBX)
+#define RSP(cpu) RRX(cpu, R_ESP)
+#define RBP(cpu) RRX(cpu, R_EBP)
+#define RSI(cpu) RRX(cpu, R_ESI)
+#define RDI(cpu) RRX(cpu, R_EDI)
+#define R8(cpu) RRX(cpu, R_R8)
+#define R9(cpu) RRX(cpu, R_R9)
+#define R10(cpu) RRX(cpu, R_R10)
+#define R11(cpu) RRX(cpu, R_R11)
+#define R12(cpu) RRX(cpu, R_R12)
+#define R13(cpu) RRX(cpu, R_R13)
+#define R14(cpu) RRX(cpu, R_R14)
+#define R15(cpu) RRX(cpu, R_R15)
+
+#define ERX(cpu, reg) (cpu->hvf_emul->regs[reg].erx)
+#define EAX(cpu) ERX(cpu, R_EAX)
+#define ECX(cpu) ERX(cpu, R_ECX)
+#define EDX(cpu) ERX(cpu, R_EDX)
+#define EBX(cpu) ERX(cpu, R_EBX)
+#define ESP(cpu) ERX(cpu, R_ESP)
+#define EBP(cpu) ERX(cpu, R_EBP)
+#define ESI(cpu) ERX(cpu, R_ESI)
+#define EDI(cpu) ERX(cpu, R_EDI)
+
+#define RX(cpu, reg) (cpu->hvf_emul->regs[reg].rx)
+#define AX(cpu) RX(cpu, R_EAX)
+#define CX(cpu) RX(cpu, R_ECX)
+#define DX(cpu) RX(cpu, R_EDX)
+#define BP(cpu) RX(cpu, R_EBP)
+#define SP(cpu) RX(cpu, R_ESP)
+#define BX(cpu) RX(cpu, R_EBX)
+#define SI(cpu) RX(cpu, R_ESI)
+#define DI(cpu) RX(cpu, R_EDI)
+
+#define RL(cpu, reg) (cpu->hvf_emul->regs[reg].lx)
+#define AL(cpu) RL(cpu, R_EAX)
+#define CL(cpu) RL(cpu, R_ECX)
+#define DL(cpu) RL(cpu, R_EDX)
+#define BL(cpu) RL(cpu, R_EBX)
+
+#define RH(cpu, reg) (cpu->hvf_emul->regs[reg].hx)
+#define AH(cpu) RH(cpu, R_EAX)
+#define CH(cpu) RH(cpu, R_ECX)
+#define DH(cpu) RH(cpu, R_EDX)
+#define BH(cpu) RH(cpu, R_EBX)
+
+/* deal with GDT/LDT descriptors in memory */
+bool x86_read_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel);
+bool x86_write_segment_descriptor(struct CPUState *cpu,
+ struct x86_segment_descriptor *desc,
+ x68_segment_selector sel);
+
+bool x86_read_call_gate(struct CPUState *cpu, struct x86_call_gate *idt_desc,
+ int gate);
+
+/* helpers */
+bool x86_is_protected(struct CPUState *cpu);
+bool x86_is_real(struct CPUState *cpu);
+bool x86_is_v8086(struct CPUState *cpu);
+bool x86_is_long_mode(struct CPUState *cpu);
+bool x86_is_long64_mode(struct CPUState *cpu);
+bool x86_is_paging_mode(struct CPUState *cpu);
+bool x86_is_pae_enabled(struct CPUState *cpu);
+
+enum X86Seg;
+target_ulong linear_addr(struct CPUState *cpu, target_ulong addr, enum X86Seg seg);
+target_ulong linear_addr_size(struct CPUState *cpu, target_ulong addr, int size,
+ enum X86Seg seg);
+target_ulong linear_rip(struct CPUState *cpu, target_ulong rip);
+
+static inline uint64_t rdtscp(void)
+{
+ uint64_t tsc;
+ __asm__ __volatile__("rdtscp; " /* serializing read of tsc */
+ "shl $32,%%rdx; " /* shift higher 32 bits stored in rdx up */
+ "or %%rdx,%%rax" /* and or onto rax */
+ : "=a"(tsc) /* output to tsc variable */
+ :
+ : "%rcx", "%rdx"); /* rcx and rdx are clobbered */
+
+ return tsc;
+}
+
+#endif
diff --git a/target/i386/hvf/x86_cpuid.c b/target/i386/hvf/x86_cpuid.c
new file mode 100644
index 0000000000..9874a46e92
--- /dev/null
+++ b/target/i386/hvf/x86_cpuid.c
@@ -0,0 +1,166 @@
+/*
+ * i386 CPUID helper functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * cpuid
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "x86.h"
+#include "vmx.h"
+#include "sysemu/hvf.h"
+
+static uint64_t xgetbv(uint32_t xcr)
+{
+ uint32_t eax, edx;
+
+ __asm__ volatile ("xgetbv"
+ : "=a" (eax), "=d" (edx)
+ : "c" (xcr));
+
+ return (((uint64_t)edx) << 32) | eax;
+}
+
+static bool vmx_mpx_supported()
+{
+ uint64_t cap_exit, cap_entry;
+
+ hv_vmx_read_capability(HV_VMX_CAP_ENTRY, &cap_entry);
+ hv_vmx_read_capability(HV_VMX_CAP_EXIT, &cap_exit);
+
+ return ((cap_exit & (1 << 23)) && (cap_entry & (1 << 16)));
+}
+
+uint32_t hvf_get_supported_cpuid(uint32_t func, uint32_t idx,
+ int reg)
+{
+ uint64_t cap;
+ uint32_t eax, ebx, ecx, edx;
+
+ host_cpuid(func, idx, &eax, &ebx, &ecx, &edx);
+
+ switch (func) {
+ case 0:
+ eax = eax < (uint32_t)0xd ? eax : (uint32_t)0xd;
+ break;
+ case 1:
+ edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX |
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS;
+ ecx &= CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 |
+ CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID |
+ CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_MOVBE |
+ CPUID_EXT_POPCNT | CPUID_EXT_AES | CPUID_EXT_XSAVE |
+ CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND;
+ ecx |= CPUID_EXT_HYPERVISOR;
+ break;
+ case 6:
+ eax = CPUID_6_EAX_ARAT;
+ ebx = 0;
+ ecx = 0;
+ edx = 0;
+ break;
+ case 7:
+ if (idx == 0) {
+ ebx &= CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 |
+ CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 |
+ CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_RTM |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA |
+ CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512PF |
+ CPUID_7_0_EBX_AVX512ER | CPUID_7_0_EBX_AVX512CD |
+ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB |
+ CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_SHA_NI |
+ CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL |
+ CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_MPX;
+
+ if (!vmx_mpx_supported()) {
+ ebx &= ~CPUID_7_0_EBX_MPX;
+ }
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
+ if (!(cap & CPU_BASED2_INVPCID)) {
+ ebx &= ~CPUID_7_0_EBX_INVPCID;
+ }
+
+ ecx &= CPUID_7_0_ECX_AVX512BMI | CPUID_7_0_ECX_AVX512_VPOPCNTDQ;
+ edx &= CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS;
+ } else {
+ ebx = 0;
+ ecx = 0;
+ edx = 0;
+ }
+ eax = 0;
+ break;
+ case 0xD:
+ if (idx == 0) {
+ uint64_t host_xcr0 = xgetbv(0);
+ uint64_t supp_xcr0 = host_xcr0 & (XSTATE_FP_MASK | XSTATE_SSE_MASK |
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK |
+ XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK |
+ XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK);
+ eax &= supp_xcr0;
+ if (!vmx_mpx_supported()) {
+ eax &= ~(XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK);
+ }
+ } else if (idx == 1) {
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2, &cap);
+ eax &= CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1;
+ if (!(cap & CPU_BASED2_XSAVES_XRSTORS)) {
+ eax &= ~CPUID_XSAVE_XSAVES;
+ }
+ }
+ break;
+ case 0x80000001:
+ /* LM only if HVF in 64-bit mode */
+ edx &= CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
+ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
+ CPUID_EXT2_SYSCALL | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
+ CPUID_PAT | CPUID_PSE36 | CPUID_EXT2_MMXEXT | CPUID_MMX |
+ CPUID_FXSR | CPUID_EXT2_FXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_3DNOWEXT |
+ CPUID_EXT2_3DNOW | CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX;
+ hv_vmx_read_capability(HV_VMX_CAP_PROCBASED, &cap);
+ if (!(cap & CPU_BASED_TSC_OFFSET)) {
+ edx &= ~CPUID_EXT2_RDTSCP;
+ }
+ ecx &= CPUID_EXT3_LAHF_LM | CPUID_EXT3_CMP_LEG | CPUID_EXT3_CR8LEG |
+ CPUID_EXT3_ABM | CPUID_EXT3_SSE4A | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_OSVW | CPUID_EXT3_XOP |
+ CPUID_EXT3_FMA4 | CPUID_EXT3_TBM;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (reg) {
+ case R_EAX:
+ return eax;
+ case R_EBX:
+ return ebx;
+ case R_ECX:
+ return ecx;
+ case R_EDX:
+ return edx;
+ default:
+ return 0;
+ }
+}
diff --git a/target/i386/hvf/x86_decode.c b/target/i386/hvf/x86_decode.c
new file mode 100644
index 0000000000..bf93e8207d
--- /dev/null
+++ b/target/i386/hvf/x86_decode.c
@@ -0,0 +1,2186 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "panic.h"
+#include "x86_decode.h"
+#include "string.h"
+#include "vmx.h"
+#include "x86_mmu.h"
+#include "x86_descr.h"
+
+#define OPCODE_ESCAPE 0xf
+
+static void decode_invalid(CPUX86State *env, struct x86_decode *decode)
+{
+ printf("%llx: failed to decode instruction ", env->hvf_emul->fetch_rip -
+ decode->len);
+ for (int i = 0; i < decode->opcode_len; i++) {
+ printf("%x ", decode->opcode[i]);
+ }
+ printf("\n");
+ VM_PANIC("decoder failed\n");
+}
+
+uint64_t sign(uint64_t val, int size)
+{
+ switch (size) {
+ case 1:
+ val = (int8_t)val;
+ break;
+ case 2:
+ val = (int16_t)val;
+ break;
+ case 4:
+ val = (int32_t)val;
+ break;
+ case 8:
+ val = (int64_t)val;
+ break;
+ default:
+ VM_PANIC_EX("%s invalid size %d\n", __func__, size);
+ break;
+ }
+ return val;
+}
+
+static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode,
+ int size)
+{
+ target_ulong val = 0;
+
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ VM_PANIC_EX("%s invalid size %d\n", __func__, size);
+ break;
+ }
+ target_ulong va = linear_rip(ENV_GET_CPU(env), RIP(env)) + decode->len;
+ vmx_read_mem(ENV_GET_CPU(env), &val, va, size);
+ decode->len += size;
+
+ return val;
+}
+
+static inline uint8_t decode_byte(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint8_t)decode_bytes(env, decode, 1);
+}
+
+static inline uint16_t decode_word(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint16_t)decode_bytes(env, decode, 2);
+}
+
+static inline uint32_t decode_dword(CPUX86State *env, struct x86_decode *decode)
+{
+ return (uint32_t)decode_bytes(env, decode, 4);
+}
+
+static inline uint64_t decode_qword(CPUX86State *env, struct x86_decode *decode)
+{
+ return decode_bytes(env, decode, 8);
+}
+
+static void decode_modrm_rm(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_RM;
+}
+
+static void decode_modrm_reg(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = decode->modrm.reg;
+ op->ptr = get_reg_ref(env, op->reg, decode->rex.r, decode->operand_size);
+}
+
+static void decode_rax(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = R_EAX;
+ op->ptr = get_reg_ref(env, op->reg, 0, decode->operand_size);
+}
+
+static inline void decode_immediate(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *var, int size)
+{
+ var->type = X86_VAR_IMMEDIATE;
+ var->size = size;
+ switch (size) {
+ case 1:
+ var->val = decode_byte(env, decode);
+ break;
+ case 2:
+ var->val = decode_word(env, decode);
+ break;
+ case 4:
+ var->val = decode_dword(env, decode);
+ break;
+ case 8:
+ var->val = decode_qword(env, decode);
+ break;
+ default:
+ VM_PANIC_EX("bad size %d\n", size);
+ }
+}
+
+static void decode_imm8(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 1);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm8_signed(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 1);
+ op->val = sign(op->val, 1);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, 2);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+
+static void decode_imm(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ if (8 == decode->operand_size) {
+ decode_immediate(env, decode, op, 4);
+ op->val = sign(op->val, decode->operand_size);
+ } else {
+ decode_immediate(env, decode, op, decode->operand_size);
+ }
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm_signed(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ decode_immediate(env, decode, op, decode->operand_size);
+ op->val = sign(op->val, decode->operand_size);
+ op->type = X86_VAR_IMMEDIATE;
+}
+
+static void decode_imm_1(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_IMMEDIATE;
+ op->val = 1;
+}
+
+static void decode_imm_0(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_IMMEDIATE;
+ op->val = 0;
+}
+
+
+static void decode_pushseg(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
+
+ decode->op[0].type = X86_VAR_REG;
+ switch (op) {
+ case 0xe:
+ decode->op[0].reg = R_CS;
+ break;
+ case 0x16:
+ decode->op[0].reg = R_SS;
+ break;
+ case 0x1e:
+ decode->op[0].reg = R_DS;
+ break;
+ case 0x06:
+ decode->op[0].reg = R_ES;
+ break;
+ case 0xa0:
+ decode->op[0].reg = R_FS;
+ break;
+ case 0xa8:
+ decode->op[0].reg = R_GS;
+ break;
+ }
+}
+
+static void decode_popseg(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t op = (decode->opcode_len > 1) ? decode->opcode[1] : decode->opcode[0];
+
+ decode->op[0].type = X86_VAR_REG;
+ switch (op) {
+ case 0xf:
+ decode->op[0].reg = R_CS;
+ break;
+ case 0x17:
+ decode->op[0].reg = R_SS;
+ break;
+ case 0x1f:
+ decode->op[0].reg = R_DS;
+ break;
+ case 0x07:
+ decode->op[0].reg = R_ES;
+ break;
+ case 0xa1:
+ decode->op[0].reg = R_FS;
+ break;
+ case 0xa9:
+ decode->op[0].reg = R_GS;
+ break;
+ }
+}
+
+static void decode_incgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x40;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+}
+
+static void decode_decgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x48;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+}
+
+static void decode_incgroup2(CPUX86State *env, struct x86_decode *decode)
+{
+ if (!decode->modrm.reg) {
+ decode->cmd = X86_DECODE_CMD_INC;
+ } else if (1 == decode->modrm.reg) {
+ decode->cmd = X86_DECODE_CMD_DEC;
+ }
+}
+
+static void decode_pushgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x50;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+}
+
+static void decode_popgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x58;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+}
+
+static void decode_jxx(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->displacement = decode_bytes(env, decode, decode->operand_size);
+ decode->displacement_size = decode->operand_size;
+}
+
+static void decode_farjmp(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_IMMEDIATE;
+ decode->op[0].val = decode_bytes(env, decode, decode->operand_size);
+ decode->displacement = decode_word(env, decode);
+}
+
+static void decode_addgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_ADD,
+ X86_DECODE_CMD_OR,
+ X86_DECODE_CMD_ADC,
+ X86_DECODE_CMD_SBB,
+ X86_DECODE_CMD_AND,
+ X86_DECODE_CMD_SUB,
+ X86_DECODE_CMD_XOR,
+ X86_DECODE_CMD_CMP
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_rotgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_ROL,
+ X86_DECODE_CMD_ROR,
+ X86_DECODE_CMD_RCL,
+ X86_DECODE_CMD_RCR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SHR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SAR
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_f7group(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_NOT,
+ X86_DECODE_CMD_NEG,
+ X86_DECODE_CMD_MUL,
+ X86_DECODE_CMD_IMUL_1,
+ X86_DECODE_CMD_DIV,
+ X86_DECODE_CMD_IDIV
+ };
+ decode->cmd = group[decode->modrm.reg];
+ decode_modrm_rm(env, decode, &decode->op[0]);
+
+ switch (decode->modrm.reg) {
+ case 0:
+ case 1:
+ decode_imm(env, decode, &decode->op[1]);
+ break;
+ case 2:
+ break;
+ case 3:
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+ break;
+ default:
+ break;
+ }
+}
+
+static void decode_xchgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0x90;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+}
+
+static void decode_movgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0xb8;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);
+}
+
+static void fetch_moffs(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_OFFSET;
+ op->ptr = decode_bytes(env, decode, decode->addressing_size);
+}
+
+static void decode_movgroup8(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[0] - 0xb0;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+ decode_immediate(env, decode, &decode->op[1], decode->operand_size);
+}
+
+static void decode_rcx(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X86_VAR_REG;
+ op->reg = R_ECX;
+ op->ptr = get_reg_ref(env, op->reg, decode->rex.b, decode->operand_size);
+}
+
+struct decode_tbl {
+ uint8_t opcode;
+ enum x86_decode_cmd cmd;
+ uint8_t operand_size;
+ bool is_modrm;
+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op1);
+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op2);
+ void (*decode_op3)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op3);
+ void (*decode_op4)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op4);
+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
+ uint32_t flags_mask;
+};
+
+struct decode_x87_tbl {
+ uint8_t opcode;
+ uint8_t modrm_reg;
+ uint8_t modrm_mod;
+ enum x86_decode_cmd cmd;
+ uint8_t operand_size;
+ bool rev;
+ bool pop;
+ void (*decode_op1)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op1);
+ void (*decode_op2)(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op2);
+ void (*decode_postfix)(CPUX86State *env, struct x86_decode *decode);
+ uint32_t flags_mask;
+};
+
+struct decode_tbl invl_inst = {0x0, 0, 0, false, NULL, NULL, NULL, NULL,
+ decode_invalid};
+
+struct decode_tbl _decode_tbl1[255];
+struct decode_tbl _decode_tbl2[255];
+struct decode_x87_tbl _decode_tbl3[255];
+
+static void decode_x87_ins(CPUX86State *env, struct x86_decode *decode)
+{
+ struct decode_x87_tbl *decoder;
+
+ decode->is_fpu = true;
+ int mode = decode->modrm.mod == 3 ? 1 : 0;
+ int index = ((decode->opcode[0] & 0xf) << 4) | (mode << 3) |
+ decode->modrm.reg;
+
+ decoder = &_decode_tbl3[index];
+
+ decode->cmd = decoder->cmd;
+ if (decoder->operand_size) {
+ decode->operand_size = decoder->operand_size;
+ }
+ decode->flags_mask = decoder->flags_mask;
+ decode->fpop_stack = decoder->pop;
+ decode->frev = decoder->rev;
+
+ if (decoder->decode_op1) {
+ decoder->decode_op1(env, decode, &decode->op[0]);
+ }
+ if (decoder->decode_op2) {
+ decoder->decode_op2(env, decode, &decode->op[1]);
+ }
+ if (decoder->decode_postfix) {
+ decoder->decode_postfix(env, decode);
+ }
+
+ VM_PANIC_ON_EX(!decode->cmd, "x87 opcode %x %x (%x %x) not decoded\n",
+ decode->opcode[0], decode->modrm.modrm, decoder->modrm_reg,
+ decoder->modrm_mod);
+}
+
+static void decode_ffgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_INC,
+ X86_DECODE_CMD_DEC,
+ X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_PUSH,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL
+ };
+ decode->cmd = group[decode->modrm.reg];
+ if (decode->modrm.reg > 2) {
+ decode->flags_mask = 0;
+ }
+}
+
+static void decode_sldtgroup(CPUX86State *env, struct x86_decode *decode)
+{
+
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_SLDT,
+ X86_DECODE_CMD_STR,
+ X86_DECODE_CMD_LLDT,
+ X86_DECODE_CMD_LTR,
+ X86_DECODE_CMD_VERR,
+ X86_DECODE_CMD_VERW,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL
+ };
+ decode->cmd = group[decode->modrm.reg];
+ printf("%llx: decode_sldtgroup: %d\n", env->hvf_emul->fetch_rip,
+ decode->modrm.reg);
+}
+
+static void decode_lidtgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_SGDT,
+ X86_DECODE_CMD_SIDT,
+ X86_DECODE_CMD_LGDT,
+ X86_DECODE_CMD_LIDT,
+ X86_DECODE_CMD_SMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_INVLPG
+ };
+ decode->cmd = group[decode->modrm.reg];
+ if (0xf9 == decode->modrm.modrm) {
+ decode->opcode[decode->len++] = decode->modrm.modrm;
+ decode->cmd = X86_DECODE_CMD_RDTSCP;
+ }
+}
+
+static void decode_btgroup(CPUX86State *env, struct x86_decode *decode)
+{
+ enum x86_decode_cmd group[] = {
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_INVL,
+ X86_DECODE_CMD_BT,
+ X86_DECODE_CMD_BTS,
+ X86_DECODE_CMD_BTR,
+ X86_DECODE_CMD_BTC
+ };
+ decode->cmd = group[decode->modrm.reg];
+}
+
+static void decode_x87_general(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->is_fpu = true;
+}
+
+static void decode_x87_modrm_floatp(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_FLOATP;
+}
+
+static void decode_x87_modrm_intp(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_INTP;
+}
+
+static void decode_x87_modrm_bytep(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_BYTEP;
+}
+
+static void decode_x87_modrm_st0(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_REG;
+ op->reg = 0;
+}
+
+static void decode_decode_x87_modrm_st0(CPUX86State *env,
+ struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ op->type = X87_VAR_REG;
+ op->reg = decode->modrm.modrm & 7;
+}
+
+
+static void decode_aegroup(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->is_fpu = true;
+ switch (decode->modrm.reg) {
+ case 0:
+ decode->cmd = X86_DECODE_CMD_FXSAVE;
+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);
+ break;
+ case 1:
+ decode_x87_modrm_bytep(env, decode, &decode->op[0]);
+ decode->cmd = X86_DECODE_CMD_FXRSTOR;
+ break;
+ case 5:
+ if (decode->modrm.modrm == 0xe8) {
+ decode->cmd = X86_DECODE_CMD_LFENCE;
+ } else {
+ VM_PANIC("xrstor");
+ }
+ break;
+ case 6:
+ VM_PANIC_ON(decode->modrm.modrm != 0xf0);
+ decode->cmd = X86_DECODE_CMD_MFENCE;
+ break;
+ case 7:
+ if (decode->modrm.modrm == 0xf8) {
+ decode->cmd = X86_DECODE_CMD_SFENCE;
+ } else {
+ decode->cmd = X86_DECODE_CMD_CLFLUSH;
+ }
+ break;
+ default:
+ VM_PANIC_EX("0xae: reg %d\n", decode->modrm.reg);
+ break;
+ }
+}
+
+static void decode_bswap(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = decode->opcode[1] - 0xc8;
+ decode->op[0].ptr = get_reg_ref(env, decode->op[0].reg, decode->rex.b,
+ decode->operand_size);
+}
+
+static void decode_d9_4(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->modrm.modrm) {
+ case 0xe0:
+ /* FCHS */
+ decode->cmd = X86_DECODE_CMD_FCHS;
+ break;
+ case 0xe1:
+ decode->cmd = X86_DECODE_CMD_FABS;
+ break;
+ case 0xe4:
+ VM_PANIC("FTST");
+ break;
+ case 0xe5:
+ /* FXAM */
+ decode->cmd = X86_DECODE_CMD_FXAM;
+ break;
+ default:
+ VM_PANIC("FLDENV");
+ break;
+ }
+}
+
+static void decode_db_4(CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->modrm.modrm) {
+ case 0xe0:
+ VM_PANIC_EX("unhandled FNENI: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe1:
+ VM_PANIC_EX("unhandled FNDISI: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe2:
+ VM_PANIC_EX("unhandled FCLEX: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ case 0xe3:
+ decode->cmd = X86_DECODE_CMD_FNINIT;
+ break;
+ case 0xe4:
+ decode->cmd = X86_DECODE_CMD_FNSETPM;
+ break;
+ default:
+ VM_PANIC_EX("unhandled fpu opcode: %x %x\n", decode->opcode[0],
+ decode->modrm.modrm);
+ break;
+ }
+}
+
+
+#define RFLAGS_MASK_NONE 0
+#define RFLAGS_MASK_OSZAPC (RFLAGS_OF | RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | \
+ RFLAGS_PF | RFLAGS_CF)
+#define RFLAGS_MASK_LAHF (RFLAGS_SF | RFLAGS_ZF | RFLAGS_AF | RFLAGS_PF | \
+ RFLAGS_CF)
+#define RFLAGS_MASK_CF (RFLAGS_CF)
+#define RFLAGS_MASK_IF (RFLAGS_IF)
+#define RFLAGS_MASK_TF (RFLAGS_TF)
+#define RFLAGS_MASK_DF (RFLAGS_DF)
+#define RFLAGS_MASK_ZF (RFLAGS_ZF)
+
+struct decode_tbl _1op_inst[] = {
+ {0x0, X86_DECODE_CMD_ADD, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1, X86_DECODE_CMD_ADD, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2, X86_DECODE_CMD_ADD, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3, X86_DECODE_CMD_ADD, 0, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x4, X86_DECODE_CMD_ADD, 1, false, decode_rax, decode_imm8, NULL, NULL,
+ NULL, RFLAGS_MASK_OSZAPC},
+ {0x5, X86_DECODE_CMD_ADD, 0, false, decode_rax, decode_imm, NULL, NULL,
+ NULL, RFLAGS_MASK_OSZAPC},
+ {0x6, X86_DECODE_CMD_PUSH_SEG, 0, false, false, NULL, NULL, NULL,
+ decode_pushseg, RFLAGS_MASK_NONE},
+ {0x7, X86_DECODE_CMD_POP_SEG, 0, false, false, NULL, NULL, NULL,
+ decode_popseg, RFLAGS_MASK_NONE},
+ {0x8, X86_DECODE_CMD_OR, 1, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x9, X86_DECODE_CMD_OR, 0, true, decode_modrm_rm, decode_modrm_reg, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa, X86_DECODE_CMD_OR, 1, true, decode_modrm_reg, decode_modrm_rm, NULL,
+ NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xb, X86_DECODE_CMD_OR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xc, X86_DECODE_CMD_OR, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xd, X86_DECODE_CMD_OR, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xe, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0xf, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+
+ {0x10, X86_DECODE_CMD_ADC, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x11, X86_DECODE_CMD_ADC, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x12, X86_DECODE_CMD_ADC, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x13, X86_DECODE_CMD_ADC, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x14, X86_DECODE_CMD_ADC, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x15, X86_DECODE_CMD_ADC, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x16, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0x17, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+
+ {0x18, X86_DECODE_CMD_SBB, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x19, X86_DECODE_CMD_SBB, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1a, X86_DECODE_CMD_SBB, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1b, X86_DECODE_CMD_SBB, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1c, X86_DECODE_CMD_SBB, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x1d, X86_DECODE_CMD_SBB, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x1e, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0x1f, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+
+ {0x20, X86_DECODE_CMD_AND, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x21, X86_DECODE_CMD_AND, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x22, X86_DECODE_CMD_AND, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x23, X86_DECODE_CMD_AND, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x24, X86_DECODE_CMD_AND, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x25, X86_DECODE_CMD_AND, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x28, X86_DECODE_CMD_SUB, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x29, X86_DECODE_CMD_SUB, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2a, X86_DECODE_CMD_SUB, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2b, X86_DECODE_CMD_SUB, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2c, X86_DECODE_CMD_SUB, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2d, X86_DECODE_CMD_SUB, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x2f, X86_DECODE_CMD_DAS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x30, X86_DECODE_CMD_XOR, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x31, X86_DECODE_CMD_XOR, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x32, X86_DECODE_CMD_XOR, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x33, X86_DECODE_CMD_XOR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x34, X86_DECODE_CMD_XOR, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x35, X86_DECODE_CMD_XOR, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x38, X86_DECODE_CMD_CMP, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x39, X86_DECODE_CMD_CMP, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3a, X86_DECODE_CMD_CMP, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3b, X86_DECODE_CMD_CMP, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3c, X86_DECODE_CMD_CMP, 1, false, decode_rax, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x3d, X86_DECODE_CMD_CMP, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x3f, X86_DECODE_CMD_AAS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x40, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x41, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x42, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x43, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x44, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x45, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x46, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+ {0x47, X86_DECODE_CMD_INC, 0, false,
+ NULL, NULL, NULL, NULL, decode_incgroup, RFLAGS_MASK_OSZAPC},
+
+ {0x48, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x49, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4a, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4b, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4c, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4d, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4e, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+ {0x4f, X86_DECODE_CMD_DEC, 0, false,
+ NULL, NULL, NULL, NULL, decode_decgroup, RFLAGS_MASK_OSZAPC},
+
+ {0x50, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x51, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x52, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x53, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x54, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x55, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x56, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+ {0x57, X86_DECODE_CMD_PUSH, 0, false,
+ NULL, NULL, NULL, NULL, decode_pushgroup, RFLAGS_MASK_NONE},
+
+ {0x58, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x59, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5a, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5b, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5c, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5d, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5e, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+ {0x5f, X86_DECODE_CMD_POP, 0, false,
+ NULL, NULL, NULL, NULL, decode_popgroup, RFLAGS_MASK_NONE},
+
+ {0x60, X86_DECODE_CMD_PUSHA, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x61, X86_DECODE_CMD_POPA, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x68, X86_DECODE_CMD_PUSH, 0, false, decode_imm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6a, X86_DECODE_CMD_PUSH, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x69, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg,
+ decode_modrm_rm, decode_imm, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x6b, X86_DECODE_CMD_IMUL_3, 0, true, decode_modrm_reg, decode_modrm_rm,
+ decode_imm8_signed, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0x6c, X86_DECODE_CMD_INS, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6d, X86_DECODE_CMD_INS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6e, X86_DECODE_CMD_OUTS, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x6f, X86_DECODE_CMD_OUTS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x70, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x71, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x72, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x73, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x74, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x75, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x76, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x77, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x78, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x79, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7a, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7b, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7c, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7d, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7e, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x7f, X86_DECODE_CMD_JXX, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+
+ {0x80, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x81, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x82, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x83, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8_signed,
+ NULL, NULL, decode_addgroup, RFLAGS_MASK_OSZAPC},
+ {0x84, X86_DECODE_CMD_TST, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x85, X86_DECODE_CMD_TST, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0x86, X86_DECODE_CMD_XCHG, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x87, X86_DECODE_CMD_XCHG, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x88, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x89, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8a, X86_DECODE_CMD_MOV, 1, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8b, X86_DECODE_CMD_MOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8c, X86_DECODE_CMD_MOV_FROM_SEG, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8d, X86_DECODE_CMD_LEA, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8e, X86_DECODE_CMD_MOV_TO_SEG, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x8f, X86_DECODE_CMD_POP, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x90, X86_DECODE_CMD_NOP, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x91, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x92, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x93, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x94, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x95, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x96, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+ {0x97, X86_DECODE_CMD_XCHG, 0, false, NULL, decode_rax,
+ NULL, NULL, decode_xchgroup, RFLAGS_MASK_NONE},
+
+ {0x98, X86_DECODE_CMD_CBW, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x99, X86_DECODE_CMD_CWD, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0x9a, X86_DECODE_CMD_CALL_FAR, 0, false, NULL,
+ NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},
+
+ {0x9c, X86_DECODE_CMD_PUSHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ /*{0x9d, X86_DECODE_CMD_POPF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_POPF},*/
+ {0x9e, X86_DECODE_CMD_SAHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9f, X86_DECODE_CMD_LAHF, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_LAHF},
+
+ {0xa0, X86_DECODE_CMD_MOV, 1, false, decode_rax, fetch_moffs,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa1, X86_DECODE_CMD_MOV, 0, false, decode_rax, fetch_moffs,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa2, X86_DECODE_CMD_MOV, 1, false, fetch_moffs, decode_rax,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa3, X86_DECODE_CMD_MOV, 0, false, fetch_moffs, decode_rax,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xa4, X86_DECODE_CMD_MOVS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa5, X86_DECODE_CMD_MOVS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa6, X86_DECODE_CMD_CMPS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa7, X86_DECODE_CMD_CMPS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xaa, X86_DECODE_CMD_STOS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xab, X86_DECODE_CMD_STOS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xac, X86_DECODE_CMD_LODS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xad, X86_DECODE_CMD_LODS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xae, X86_DECODE_CMD_SCAS, 1, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xaf, X86_DECODE_CMD_SCAS, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xa8, X86_DECODE_CMD_TST, 1, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa9, X86_DECODE_CMD_TST, 0, false, decode_rax, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xb0, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb1, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb2, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb3, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb4, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb5, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb6, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+ {0xb7, X86_DECODE_CMD_MOV, 1, false, NULL,
+ NULL, NULL, NULL, decode_movgroup8, RFLAGS_MASK_NONE},
+
+ {0xb8, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xb9, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xba, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbb, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbc, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbd, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbe, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+ {0xbf, X86_DECODE_CMD_MOV, 0, false, NULL,
+ NULL, NULL, NULL, decode_movgroup, RFLAGS_MASK_NONE},
+
+ {0xc0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xc1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+
+ {0xc2, X86_DECODE_RET_NEAR, 0, false, decode_imm16,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc3, X86_DECODE_RET_NEAR, 0, false, NULL,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xc4, X86_DECODE_CMD_LES, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc5, X86_DECODE_CMD_LDS, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xc6, X86_DECODE_CMD_MOV, 1, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc7, X86_DECODE_CMD_MOV, 0, true, decode_modrm_rm, decode_imm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xc8, X86_DECODE_CMD_ENTER, 0, false, decode_imm16, decode_imm8,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xc9, X86_DECODE_CMD_LEAVE, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xca, X86_DECODE_RET_FAR, 0, false, decode_imm16, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xcb, X86_DECODE_RET_FAR, 0, false, decode_imm_0, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xcd, X86_DECODE_CMD_INT, 0, false, decode_imm8, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ /*{0xcf, X86_DECODE_CMD_IRET, 0, false, NULL, NULL,
+ NULL, NULL, NULL, RFLAGS_MASK_IRET},*/
+
+ {0xd0, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_imm_1,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xd1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm_1,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xd2, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm, decode_rcx,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+ {0xd3, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_rcx,
+ NULL, NULL, decode_rotgroup, RFLAGS_MASK_OSZAPC},
+
+ {0xd4, X86_DECODE_CMD_AAM, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xd5, X86_DECODE_CMD_AAD, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xd7, X86_DECODE_CMD_XLAT, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xd8, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xd9, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xda, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdb, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdc, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdd, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xde, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+ {0xdf, X86_DECODE_CMD_INVL, 0, true, NULL,
+ NULL, NULL, NULL, decode_x87_ins, RFLAGS_MASK_NONE},
+
+ {0xe0, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe1, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe2, X86_DECODE_CMD_LOOP, 0, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xe3, X86_DECODE_CMD_JCXZ, 1, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+
+ {0xe4, X86_DECODE_CMD_IN, 1, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe5, X86_DECODE_CMD_IN, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe6, X86_DECODE_CMD_OUT, 1, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe7, X86_DECODE_CMD_OUT, 0, false, decode_imm8,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe8, X86_DECODE_CMD_CALL_NEAR, 0, false, decode_imm_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xe9, X86_DECODE_CMD_JMP_NEAR, 0, false, decode_imm_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xea, X86_DECODE_CMD_JMP_FAR, 0, false,
+ NULL, NULL, NULL, NULL, decode_farjmp, RFLAGS_MASK_NONE},
+ {0xeb, X86_DECODE_CMD_JMP_NEAR, 1, false, decode_imm8_signed,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xec, X86_DECODE_CMD_IN, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xed, X86_DECODE_CMD_IN, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xee, X86_DECODE_CMD_OUT, 1, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xef, X86_DECODE_CMD_OUT, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xf4, X86_DECODE_CMD_HLT, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xf5, X86_DECODE_CMD_CMC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
+
+ {0xf6, X86_DECODE_CMD_INVL, 1, true,
+ NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},
+ {0xf7, X86_DECODE_CMD_INVL, 0, true,
+ NULL, NULL, NULL, NULL, decode_f7group, RFLAGS_MASK_OSZAPC},
+
+ {0xf8, X86_DECODE_CMD_CLC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
+ {0xf9, X86_DECODE_CMD_STC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_CF},
+
+ {0xfa, X86_DECODE_CMD_CLI, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},
+ {0xfb, X86_DECODE_CMD_STI, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_IF},
+ {0xfc, X86_DECODE_CMD_CLD, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},
+ {0xfd, X86_DECODE_CMD_STD, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_DF},
+ {0xfe, X86_DECODE_CMD_INVL, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_incgroup2, RFLAGS_MASK_OSZAPC},
+ {0xff, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_ffgroup, RFLAGS_MASK_OSZAPC},
+};
+
+struct decode_tbl _2op_inst[] = {
+ {0x0, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_sldtgroup, RFLAGS_MASK_NONE},
+ {0x1, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_lidtgroup, RFLAGS_MASK_NONE},
+ {0x6, X86_DECODE_CMD_CLTS, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_TF},
+ {0x9, X86_DECODE_CMD_WBINVD, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x18, X86_DECODE_CMD_PREFETCH, 0, true,
+ NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},
+ {0x1f, X86_DECODE_CMD_NOP, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x20, X86_DECODE_CMD_MOV_FROM_CR, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x21, X86_DECODE_CMD_MOV_FROM_DR, 0, true, decode_modrm_rm,
+ decode_modrm_reg, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x22, X86_DECODE_CMD_MOV_TO_CR, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x23, X86_DECODE_CMD_MOV_TO_DR, 0, true, decode_modrm_reg,
+ decode_modrm_rm, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x30, X86_DECODE_CMD_WRMSR, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x31, X86_DECODE_CMD_RDTSC, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x32, X86_DECODE_CMD_RDMSR, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x40, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x41, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x42, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x43, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x44, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x45, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x46, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x47, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x48, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x49, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4a, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4b, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4c, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4d, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4e, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x4f, X86_DECODE_CMD_CMOV, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x77, X86_DECODE_CMD_EMMS, 0, false,
+ NULL, NULL, NULL, NULL, decode_x87_general, RFLAGS_MASK_NONE},
+ {0x82, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x83, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x84, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x85, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x86, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x87, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x88, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x89, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8a, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8b, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8c, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8d, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8e, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x8f, X86_DECODE_CMD_JXX, 0, false,
+ NULL, NULL, NULL, NULL, decode_jxx, RFLAGS_MASK_NONE},
+ {0x90, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x91, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x92, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x93, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x94, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x95, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x96, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x97, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x98, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x99, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9a, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9b, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9c, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9d, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9e, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0x9f, X86_DECODE_CMD_SETXX, 1, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xb0, X86_DECODE_CMD_CMPXCHG, 1, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb1, X86_DECODE_CMD_CMPXCHG, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xb6, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb7, X86_DECODE_CMD_MOVZX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb8, X86_DECODE_CMD_POPCNT, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xbe, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xbf, X86_DECODE_CMD_MOVSX, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa0, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0xa1, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+ {0xa2, X86_DECODE_CMD_CPUID, 0, false,
+ NULL, NULL, NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xa3, X86_DECODE_CMD_BT, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_CF},
+ {0xa4, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa5, X86_DECODE_CMD_SHLD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xa8, X86_DECODE_CMD_PUSH_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_pushseg, RFLAGS_MASK_NONE},
+ {0xa9, X86_DECODE_CMD_POP_SEG, 0, false, false,
+ NULL, NULL, NULL, decode_popseg, RFLAGS_MASK_NONE},
+ {0xab, X86_DECODE_CMD_BTS, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_CF},
+ {0xac, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_imm8, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xad, X86_DECODE_CMD_SHRD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ decode_rcx, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xae, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, decode_aegroup, RFLAGS_MASK_NONE},
+
+ {0xaf, X86_DECODE_CMD_IMUL_2, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xb2, X86_DECODE_CMD_LSS, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xb3, X86_DECODE_CMD_BTR, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xba, X86_DECODE_CMD_INVL, 0, true, decode_modrm_rm, decode_imm8,
+ NULL, NULL, decode_btgroup, RFLAGS_MASK_OSZAPC},
+ {0xbb, X86_DECODE_CMD_BTC, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xbc, X86_DECODE_CMD_BSF, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+ {0xbd, X86_DECODE_CMD_BSR, 0, true, decode_modrm_reg, decode_modrm_rm,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xc1, X86_DECODE_CMD_XADD, 0, true, decode_modrm_rm, decode_modrm_reg,
+ NULL, NULL, NULL, RFLAGS_MASK_OSZAPC},
+
+ {0xc7, X86_DECODE_CMD_CMPXCHG8B, 0, true, decode_modrm_rm,
+ NULL, NULL, NULL, NULL, RFLAGS_MASK_ZF},
+
+ {0xc8, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xc9, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xca, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcb, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcc, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcd, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xce, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+ {0xcf, X86_DECODE_CMD_BSWAP, 0, false,
+ NULL, NULL, NULL, NULL, decode_bswap, RFLAGS_MASK_NONE},
+};
+
+struct decode_x87_tbl invl_inst_x87 = {0x0, 0, 0, 0, 0, false, false, NULL,
+ NULL, decode_invalid, 0};
+
+struct decode_x87_tbl _x87_inst[] = {
+ {0xd8, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
+ decode_x87_modrm_st0, decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 4, 3, X86_DECODE_CMD_FSUB, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 5, 3, X86_DECODE_CMD_FSUB, 10, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 6, 3, X86_DECODE_CMD_FDIV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 7, 3, X86_DECODE_CMD_FDIV, 10, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd8, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+
+ {0xd9, 0, 3, X86_DECODE_CMD_FLD, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 1, 0, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 2, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 3, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 4, 3, X86_DECODE_CMD_INVL, 10, false, false,
+ decode_x87_modrm_st0, NULL, decode_d9_4, RFLAGS_MASK_NONE},
+ {0xd9, 4, 0, X86_DECODE_CMD_INVL, 4, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 5, 3, X86_DECODE_CMD_FLDxx, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xd9, 5, 0, X86_DECODE_CMD_FLDCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xd9, 7, 3, X86_DECODE_CMD_FNSTCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xd9, 7, 0, X86_DECODE_CMD_FNSTCW, 2, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xda, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 0, 0, X86_DECODE_CMD_FADD, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 1, 0, X86_DECODE_CMD_FMUL, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xda, 4, 0, X86_DECODE_CMD_FSUB, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true, decode_x87_modrm_st0,
+ decode_decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xda, 5, 0, X86_DECODE_CMD_FSUB, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 6, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xda, 6, 0, X86_DECODE_CMD_FDIV, 4, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xda, 7, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xda, 7, 0, X86_DECODE_CMD_FDIV, 4, true, false, decode_x87_modrm_st0,
+ decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+
+ {0xdb, 0, 3, X86_DECODE_CMD_FCMOV, 10, false, false, decode_x87_modrm_st0,
+ decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 0, 0, X86_DECODE_CMD_FLD, 4, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 1, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 2, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 2, 0, X86_DECODE_CMD_FST, 4, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 3, 3, X86_DECODE_CMD_FCMOV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 3, 0, X86_DECODE_CMD_FST, 4, false, true,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 4, 3, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL,
+ decode_db_4, RFLAGS_MASK_NONE},
+ {0xdb, 4, 0, X86_DECODE_CMD_INVL, 10, false, false, NULL, NULL, NULL,
+ RFLAGS_MASK_NONE},
+ {0xdb, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 5, 0, X86_DECODE_CMD_FLD, 10, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdb, 7, 0, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xdc, 0, 3, X86_DECODE_CMD_FADD, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 0, 0, X86_DECODE_CMD_FADD, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 1, 3, X86_DECODE_CMD_FMUL, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 1, 0, X86_DECODE_CMD_FMUL, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 4, 3, X86_DECODE_CMD_FSUB, 10, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 4, 0, X86_DECODE_CMD_FSUB, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 5, 3, X86_DECODE_CMD_FSUB, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 5, 0, X86_DECODE_CMD_FSUB, 8, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 6, 3, X86_DECODE_CMD_FDIV, 10, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 6, 0, X86_DECODE_CMD_FDIV, 8, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 7, 3, X86_DECODE_CMD_FDIV, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdc, 7, 0, X86_DECODE_CMD_FDIV, 8, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_floatp, NULL, RFLAGS_MASK_NONE},
+
+ {0xdd, 0, 0, X86_DECODE_CMD_FLD, 8, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 2, 3, X86_DECODE_CMD_FST, 10, false, false,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 2, 0, X86_DECODE_CMD_FST, 8, false, false,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 3, 0, X86_DECODE_CMD_FST, 8, false, true,
+ decode_x87_modrm_floatp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 4, 3, X86_DECODE_CMD_FUCOM, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 4, 0, X86_DECODE_CMD_FRSTOR, 8, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 5, 3, X86_DECODE_CMD_FUCOM, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 7, 0, X86_DECODE_CMD_FNSTSW, 0, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdd, 7, 3, X86_DECODE_CMD_FNSTSW, 0, false, false,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+
+ {0xde, 0, 3, X86_DECODE_CMD_FADD, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 0, 0, X86_DECODE_CMD_FADD, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 1, 3, X86_DECODE_CMD_FMUL, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 1, 0, X86_DECODE_CMD_FMUL, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 4, 3, X86_DECODE_CMD_FSUB, 10, true, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 4, 0, X86_DECODE_CMD_FSUB, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 5, 3, X86_DECODE_CMD_FSUB, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 5, 0, X86_DECODE_CMD_FSUB, 2, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 6, 3, X86_DECODE_CMD_FDIV, 10, true, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 6, 0, X86_DECODE_CMD_FDIV, 2, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+ {0xde, 7, 3, X86_DECODE_CMD_FDIV, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xde, 7, 0, X86_DECODE_CMD_FDIV, 2, true, false,
+ decode_x87_modrm_st0, decode_x87_modrm_intp, NULL, RFLAGS_MASK_NONE},
+
+ {0xdf, 0, 0, X86_DECODE_CMD_FLD, 2, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 1, 3, X86_DECODE_CMD_FXCH, 10, false, false,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 2, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 2, 0, X86_DECODE_CMD_FST, 2, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 3, 3, X86_DECODE_CMD_FST, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 3, 0, X86_DECODE_CMD_FST, 2, false, true,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 4, 3, X86_DECODE_CMD_FNSTSW, 2, false, true,
+ decode_x87_modrm_bytep, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 5, 3, X86_DECODE_CMD_FUCOMI, 10, false, true,
+ decode_x87_modrm_st0, decode_x87_modrm_st0, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 5, 0, X86_DECODE_CMD_FLD, 8, false, false,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+ {0xdf, 7, 0, X86_DECODE_CMD_FST, 8, false, true,
+ decode_x87_modrm_intp, NULL, NULL, RFLAGS_MASK_NONE},
+};
+
+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ target_ulong ptr = 0;
+ X86Seg seg = R_DS;
+
+ if (!decode->modrm.mod && 6 == decode->modrm.rm) {
+ op->ptr = (uint16_t)decode->displacement;
+ goto calc_addr;
+ }
+
+ if (decode->displacement_size) {
+ ptr = sign(decode->displacement, decode->displacement_size);
+ }
+
+ switch (decode->modrm.rm) {
+ case 0:
+ ptr += BX(env) + SI(env);
+ break;
+ case 1:
+ ptr += BX(env) + DI(env);
+ break;
+ case 2:
+ ptr += BP(env) + SI(env);
+ seg = R_SS;
+ break;
+ case 3:
+ ptr += BP(env) + DI(env);
+ seg = R_SS;
+ break;
+ case 4:
+ ptr += SI(env);
+ break;
+ case 5:
+ ptr += DI(env);
+ break;
+ case 6:
+ ptr += BP(env);
+ seg = R_SS;
+ break;
+ case 7:
+ ptr += BX(env);
+ break;
+ }
+calc_addr:
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->ptr = (uint16_t)ptr;
+ } else {
+ op->ptr = decode_linear_addr(env, decode, (uint16_t)ptr, seg);
+ }
+}
+
+target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size)
+{
+ target_ulong ptr = 0;
+ int which = 0;
+
+ if (is_extended) {
+ reg |= R_R8;
+ }
+
+
+ switch (size) {
+ case 1:
+ if (is_extended || reg < 4) {
+ which = 1;
+ ptr = (target_ulong)&RL(env, reg);
+ } else {
+ which = 2;
+ ptr = (target_ulong)&RH(env, reg - 4);
+ }
+ break;
+ default:
+ which = 3;
+ ptr = (target_ulong)&RRX(env, reg);
+ break;
+ }
+ return ptr;
+}
+
+target_ulong get_reg_val(CPUX86State *env, int reg, int is_extended, int size)
+{
+ target_ulong val = 0;
+ memcpy(&val, (void *)get_reg_ref(env, reg, is_extended, size), size);
+ return val;
+}
+
+static target_ulong get_sib_val(CPUX86State *env, struct x86_decode *decode,
+ X86Seg *sel)
+{
+ target_ulong base = 0;
+ target_ulong scaled_index = 0;
+ int addr_size = decode->addressing_size;
+ int base_reg = decode->sib.base;
+ int index_reg = decode->sib.index;
+
+ *sel = R_DS;
+
+ if (decode->modrm.mod || base_reg != R_EBP) {
+ if (decode->rex.b) {
+ base_reg |= R_R8;
+ }
+ if (base_reg == R_ESP || base_reg == R_EBP) {
+ *sel = R_SS;
+ }
+ base = get_reg_val(env, decode->sib.base, decode->rex.b, addr_size);
+ }
+
+ if (decode->rex.x) {
+ index_reg |= R_R8;
+ }
+
+ if (index_reg != R_ESP) {
+ scaled_index = get_reg_val(env, index_reg, decode->rex.x, addr_size) <<
+ decode->sib.scale;
+ }
+ return base + scaled_index;
+}
+
+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ X86Seg seg = R_DS;
+ target_ulong ptr = 0;
+ int addr_size = decode->addressing_size;
+
+ if (decode->displacement_size) {
+ ptr = sign(decode->displacement, decode->displacement_size);
+ }
+
+ if (4 == decode->modrm.rm) {
+ ptr += get_sib_val(env, decode, &seg);
+ } else if (!decode->modrm.mod && 5 == decode->modrm.rm) {
+ if (x86_is_long_mode(ENV_GET_CPU(env))) {
+ ptr += RIP(env) + decode->len;
+ } else {
+ ptr = decode->displacement;
+ }
+ } else {
+ if (decode->modrm.rm == R_EBP || decode->modrm.rm == R_ESP) {
+ seg = R_SS;
+ }
+ ptr += get_reg_val(env, decode->modrm.rm, decode->rex.b, addr_size);
+ }
+
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->ptr = (uint32_t)ptr;
+ } else {
+ op->ptr = decode_linear_addr(env, decode, (uint32_t)ptr, seg);
+ }
+}
+
+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ X86Seg seg = R_DS;
+ int32_t offset = 0;
+ int mod = decode->modrm.mod;
+ int rm = decode->modrm.rm;
+ target_ulong ptr;
+ int src = decode->modrm.rm;
+
+ if (decode->displacement_size) {
+ offset = sign(decode->displacement, decode->displacement_size);
+ }
+
+ if (4 == rm) {
+ ptr = get_sib_val(env, decode, &seg) + offset;
+ } else if (0 == mod && 5 == rm) {
+ ptr = RIP(env) + decode->len + (int32_t) offset;
+ } else {
+ ptr = get_reg_val(env, src, decode->rex.b, 8) + (int64_t) offset;
+ }
+
+ if (X86_DECODE_CMD_LEA == decode->cmd) {
+ op->ptr = ptr;
+ } else {
+ op->ptr = decode_linear_addr(env, decode, ptr, seg);
+ }
+}
+
+
+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op)
+{
+ if (3 == decode->modrm.mod) {
+ op->reg = decode->modrm.reg;
+ op->type = X86_VAR_REG;
+ op->ptr = get_reg_ref(env, decode->modrm.rm, decode->rex.b,
+ decode->operand_size);
+ return;
+ }
+
+ switch (decode->addressing_size) {
+ case 2:
+ calc_modrm_operand16(env, decode, op);
+ break;
+ case 4:
+ calc_modrm_operand32(env, decode, op);
+ break;
+ case 8:
+ calc_modrm_operand64(env, decode, op);
+ break;
+ default:
+ VM_PANIC_EX("unsupported address size %d\n", decode->addressing_size);
+ break;
+ }
+}
+
+static void decode_prefix(CPUX86State *env, struct x86_decode *decode)
+{
+ while (1) {
+ uint8_t byte = decode_byte(env, decode);
+ switch (byte) {
+ case PREFIX_LOCK:
+ decode->lock = byte;
+ break;
+ case PREFIX_REPN:
+ case PREFIX_REP:
+ decode->rep = byte;
+ break;
+ case PREFIX_CS_SEG_OVEERIDE:
+ case PREFIX_SS_SEG_OVEERIDE:
+ case PREFIX_DS_SEG_OVEERIDE:
+ case PREFIX_ES_SEG_OVEERIDE:
+ case PREFIX_FS_SEG_OVEERIDE:
+ case PREFIX_GS_SEG_OVEERIDE:
+ decode->segment_override = byte;
+ break;
+ case PREFIX_OP_SIZE_OVERRIDE:
+ decode->op_size_override = byte;
+ break;
+ case PREFIX_ADDR_SIZE_OVERRIDE:
+ decode->addr_size_override = byte;
+ break;
+ case PREFIX_REX ... (PREFIX_REX + 0xf):
+ if (x86_is_long_mode(ENV_GET_CPU(env))) {
+ decode->rex.rex = byte;
+ break;
+ }
+ /* fall through when not in long mode */
+ default:
+ decode->len--;
+ return;
+ }
+ }
+}
+
+void set_addressing_size(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->addressing_size = -1;
+ if (x86_is_real(ENV_GET_CPU(env)) || x86_is_v8086(ENV_GET_CPU(env))) {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 2;
+ }
+ } else if (!x86_is_long_mode(ENV_GET_CPU(env))) {
+ /* protected */
+ struct vmx_segment cs;
+ vmx_read_segment_descriptor(ENV_GET_CPU(env), &cs, R_CS);
+ /* check db */
+ if ((cs.ar >> 14) & 1) {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 2;
+ } else {
+ decode->addressing_size = 4;
+ }
+ } else {
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 2;
+ }
+ }
+ } else {
+ /* long */
+ if (decode->addr_size_override) {
+ decode->addressing_size = 4;
+ } else {
+ decode->addressing_size = 8;
+ }
+ }
+}
+
+void set_operand_size(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->operand_size = -1;
+ if (x86_is_real(ENV_GET_CPU(env)) || x86_is_v8086(ENV_GET_CPU(env))) {
+ if (decode->op_size_override) {
+ decode->operand_size = 4;
+ } else {
+ decode->operand_size = 2;
+ }
+ } else if (!x86_is_long_mode(ENV_GET_CPU(env))) {
+ /* protected */
+ struct vmx_segment cs;
+ vmx_read_segment_descriptor(ENV_GET_CPU(env), &cs, R_CS);
+ /* check db */
+ if ((cs.ar >> 14) & 1) {
+ if (decode->op_size_override) {
+ decode->operand_size = 2;
+ } else{
+ decode->operand_size = 4;
+ }
+ } else {
+ if (decode->op_size_override) {
+ decode->operand_size = 4;
+ } else {
+ decode->operand_size = 2;
+ }
+ }
+ } else {
+ /* long */
+ if (decode->op_size_override) {
+ decode->operand_size = 2;
+ } else {
+ decode->operand_size = 4;
+ }
+
+ if (decode->rex.w) {
+ decode->operand_size = 8;
+ }
+ }
+}
+
+static void decode_sib(CPUX86State *env, struct x86_decode *decode)
+{
+ if ((decode->modrm.mod != 3) && (4 == decode->modrm.rm) &&
+ (decode->addressing_size != 2)) {
+ decode->sib.sib = decode_byte(env, decode);
+ decode->sib_present = true;
+ }
+}
+
+/* 16 bit modrm */
+int disp16_tbl[4][8] = {
+ {0, 0, 0, 0, 0, 0, 2, 0},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {2, 2, 2, 2, 2, 2, 2, 2},
+ {0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+/* 32/64-bit modrm */
+int disp32_tbl[4][8] = {
+ {0, 0, 0, 0, -1, 4, 0, 0},
+ {1, 1, 1, 1, 1, 1, 1, 1},
+ {4, 4, 4, 4, 4, 4, 4, 4},
+ {0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static inline void decode_displacement(CPUX86State *env, struct x86_decode *decode)
+{
+ int addressing_size = decode->addressing_size;
+ int mod = decode->modrm.mod;
+ int rm = decode->modrm.rm;
+
+ decode->displacement_size = 0;
+ switch (addressing_size) {
+ case 2:
+ decode->displacement_size = disp16_tbl[mod][rm];
+ if (decode->displacement_size) {
+ decode->displacement = (uint16_t)decode_bytes(env, decode,
+ decode->displacement_size);
+ }
+ break;
+ case 4:
+ case 8:
+ if (-1 == disp32_tbl[mod][rm]) {
+ if (5 == decode->sib.base) {
+ decode->displacement_size = 4;
+ }
+ } else {
+ decode->displacement_size = disp32_tbl[mod][rm];
+ }
+
+ if (decode->displacement_size) {
+ decode->displacement = (uint32_t)decode_bytes(env, decode,
+ decode->displacement_size);
+ }
+ break;
+ }
+}
+
+static inline void decode_modrm(CPUX86State *env, struct x86_decode *decode)
+{
+ decode->modrm.modrm = decode_byte(env, decode);
+ decode->is_modrm = true;
+
+ decode_sib(env, decode);
+ decode_displacement(env, decode);
+}
+
+static inline void decode_opcode_general(CPUX86State *env,
+ struct x86_decode *decode,
+ uint8_t opcode,
+ struct decode_tbl *inst_decoder)
+{
+ decode->cmd = inst_decoder->cmd;
+ if (inst_decoder->operand_size) {
+ decode->operand_size = inst_decoder->operand_size;
+ }
+ decode->flags_mask = inst_decoder->flags_mask;
+
+ if (inst_decoder->is_modrm) {
+ decode_modrm(env, decode);
+ }
+ if (inst_decoder->decode_op1) {
+ inst_decoder->decode_op1(env, decode, &decode->op[0]);
+ }
+ if (inst_decoder->decode_op2) {
+ inst_decoder->decode_op2(env, decode, &decode->op[1]);
+ }
+ if (inst_decoder->decode_op3) {
+ inst_decoder->decode_op3(env, decode, &decode->op[2]);
+ }
+ if (inst_decoder->decode_op4) {
+ inst_decoder->decode_op4(env, decode, &decode->op[3]);
+ }
+ if (inst_decoder->decode_postfix) {
+ inst_decoder->decode_postfix(env, decode);
+ }
+}
+
+static inline void decode_opcode_1(CPUX86State *env, struct x86_decode *decode,
+ uint8_t opcode)
+{
+ struct decode_tbl *inst_decoder = &_decode_tbl1[opcode];
+ decode_opcode_general(env, decode, opcode, inst_decoder);
+}
+
+
+static inline void decode_opcode_2(CPUX86State *env, struct x86_decode *decode,
+ uint8_t opcode)
+{
+ struct decode_tbl *inst_decoder = &_decode_tbl2[opcode];
+ decode_opcode_general(env, decode, opcode, inst_decoder);
+}
+
+static void decode_opcodes(CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t opcode;
+
+ opcode = decode_byte(env, decode);
+ decode->opcode[decode->opcode_len++] = opcode;
+ if (opcode != OPCODE_ESCAPE) {
+ decode_opcode_1(env, decode, opcode);
+ } else {
+ opcode = decode_byte(env, decode);
+ decode->opcode[decode->opcode_len++] = opcode;
+ decode_opcode_2(env, decode, opcode);
+ }
+}
+
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode)
+{
+ memset(decode, 0, sizeof(*decode));
+ decode_prefix(env, decode);
+ set_addressing_size(env, decode);
+ set_operand_size(env, decode);
+
+ decode_opcodes(env, decode);
+
+ return decode->len;
+}
+
+void init_decoder()
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
+ memcpy(_decode_tbl1, &invl_inst, sizeof(invl_inst));
+ }
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl2); i++) {
+ memcpy(_decode_tbl2, &invl_inst, sizeof(invl_inst));
+ }
+ for (i = 0; i < ARRAY_SIZE(_decode_tbl3); i++) {
+ memcpy(_decode_tbl3, &invl_inst, sizeof(invl_inst_x87));
+
+ }
+ for (i = 0; i < ARRAY_SIZE(_1op_inst); i++) {
+ _decode_tbl1[_1op_inst[i].opcode] = _1op_inst[i];
+ }
+ for (i = 0; i < ARRAY_SIZE(_2op_inst); i++) {
+ _decode_tbl2[_2op_inst[i].opcode] = _2op_inst[i];
+ }
+ for (i = 0; i < ARRAY_SIZE(_x87_inst); i++) {
+ int index = ((_x87_inst[i].opcode & 0xf) << 4) |
+ ((_x87_inst[i].modrm_mod & 1) << 3) |
+ _x87_inst[i].modrm_reg;
+ _decode_tbl3[index] = _x87_inst[i];
+ }
+}
+
+
+const char *decode_cmd_to_string(enum x86_decode_cmd cmd)
+{
+ static const char *cmds[] = {"INVL", "PUSH", "PUSH_SEG", "POP", "POP_SEG",
+ "MOV", "MOVSX", "MOVZX", "CALL_NEAR", "CALL_NEAR_ABS_INDIRECT",
+ "CALL_FAR_ABS_INDIRECT", "CMD_CALL_FAR", "RET_NEAR", "RET_FAR", "ADD",
+ "OR", "ADC", "SBB", "AND", "SUB", "XOR", "CMP", "INC", "DEC", "TST",
+ "NOT", "NEG", "JMP_NEAR", "JMP_NEAR_ABS_INDIRECT", "JMP_FAR",
+ "JMP_FAR_ABS_INDIRECT", "LEA", "JXX", "JCXZ", "SETXX", "MOV_TO_SEG",
+ "MOV_FROM_SEG", "CLI", "STI", "CLD", "STD", "STC", "CLC", "OUT", "IN",
+ "INS", "OUTS", "LIDT", "SIDT", "LGDT", "SGDT", "SMSW", "LMSW",
+ "RDTSCP", "INVLPG", "MOV_TO_CR", "MOV_FROM_CR", "MOV_TO_DR",
+ "MOV_FROM_DR", "PUSHF", "POPF", "CPUID", "ROL", "ROR", "RCL", "RCR",
+ "SHL", "SAL", "SHR", "SHRD", "SHLD", "SAR", "DIV", "IDIV", "MUL",
+ "IMUL_3", "IMUL_2", "IMUL_1", "MOVS", "CMPS", "SCAS", "LODS", "STOS",
+ "BSWAP", "XCHG", "RDTSC", "RDMSR", "WRMSR", "ENTER", "LEAVE", "BT",
+ "BTS", "BTC", "BTR", "BSF", "BSR", "IRET", "INT", "POPA", "PUSHA",
+ "CWD", "CBW", "DAS", "AAD", "AAM", "AAS", "LOOP", "SLDT", "STR", "LLDT",
+ "LTR", "VERR", "VERW", "SAHF", "LAHF", "WBINVD", "LDS", "LSS", "LES",
+ "LGS", "LFS", "CMC", "XLAT", "NOP", "CMOV", "CLTS", "XADD", "HLT",
+ "CMPXCHG8B", "CMPXCHG", "POPCNT", "FNINIT", "FLD", "FLDxx", "FNSTCW",
+ "FNSTSW", "FNSETPM", "FSAVE", "FRSTOR", "FXSAVE", "FXRSTOR", "FDIV",
+ "FMUL", "FSUB", "FADD", "EMMS", "MFENCE", "SFENCE", "LFENCE",
+ "PREFETCH", "FST", "FABS", "FUCOM", "FUCOMI", "FLDCW",
+ "FXCH", "FCHS", "FCMOV", "FRNDINT", "FXAM", "LAST"};
+ return cmds[cmd];
+}
+
+target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
+ target_ulong addr, X86Seg seg)
+{
+ switch (decode->segment_override) {
+ case PREFIX_CS_SEG_OVEERIDE:
+ seg = R_CS;
+ break;
+ case PREFIX_SS_SEG_OVEERIDE:
+ seg = R_SS;
+ break;
+ case PREFIX_DS_SEG_OVEERIDE:
+ seg = R_DS;
+ break;
+ case PREFIX_ES_SEG_OVEERIDE:
+ seg = R_ES;
+ break;
+ case PREFIX_FS_SEG_OVEERIDE:
+ seg = R_FS;
+ break;
+ case PREFIX_GS_SEG_OVEERIDE:
+ seg = R_GS;
+ break;
+ default:
+ break;
+ }
+ return linear_addr_size(ENV_GET_CPU(env), addr, decode->addressing_size, seg);
+}
diff --git a/target/i386/hvf/x86_decode.h b/target/i386/hvf/x86_decode.h
new file mode 100644
index 0000000000..5ab6f31fa5
--- /dev/null
+++ b/target/i386/hvf/x86_decode.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_DECODE_H
+#define HVF_X86_DECODE_H 1
+
+#include "cpu.h"
+#include "x86.h"
+
+typedef enum x86_prefix {
+ /* group 1 */
+ PREFIX_LOCK = 0xf0,
+ PREFIX_REPN = 0xf2,
+ PREFIX_REP = 0xf3,
+ /* group 2 */
+ PREFIX_CS_SEG_OVEERIDE = 0x2e,
+ PREFIX_SS_SEG_OVEERIDE = 0x36,
+ PREFIX_DS_SEG_OVEERIDE = 0x3e,
+ PREFIX_ES_SEG_OVEERIDE = 0x26,
+ PREFIX_FS_SEG_OVEERIDE = 0x64,
+ PREFIX_GS_SEG_OVEERIDE = 0x65,
+ /* group 3 */
+ PREFIX_OP_SIZE_OVERRIDE = 0x66,
+ /* group 4 */
+ PREFIX_ADDR_SIZE_OVERRIDE = 0x67,
+
+ PREFIX_REX = 0x40,
+} x86_prefix;
+
+enum x86_decode_cmd {
+ X86_DECODE_CMD_INVL = 0,
+
+ X86_DECODE_CMD_PUSH,
+ X86_DECODE_CMD_PUSH_SEG,
+ X86_DECODE_CMD_POP,
+ X86_DECODE_CMD_POP_SEG,
+ X86_DECODE_CMD_MOV,
+ X86_DECODE_CMD_MOVSX,
+ X86_DECODE_CMD_MOVZX,
+ X86_DECODE_CMD_CALL_NEAR,
+ X86_DECODE_CMD_CALL_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_CALL_FAR,
+ X86_DECODE_RET_NEAR,
+ X86_DECODE_RET_FAR,
+ X86_DECODE_CMD_ADD,
+ X86_DECODE_CMD_OR,
+ X86_DECODE_CMD_ADC,
+ X86_DECODE_CMD_SBB,
+ X86_DECODE_CMD_AND,
+ X86_DECODE_CMD_SUB,
+ X86_DECODE_CMD_XOR,
+ X86_DECODE_CMD_CMP,
+ X86_DECODE_CMD_INC,
+ X86_DECODE_CMD_DEC,
+ X86_DECODE_CMD_TST,
+ X86_DECODE_CMD_NOT,
+ X86_DECODE_CMD_NEG,
+ X86_DECODE_CMD_JMP_NEAR,
+ X86_DECODE_CMD_JMP_NEAR_ABS_INDIRECT,
+ X86_DECODE_CMD_JMP_FAR,
+ X86_DECODE_CMD_JMP_FAR_ABS_INDIRECT,
+ X86_DECODE_CMD_LEA,
+ X86_DECODE_CMD_JXX,
+ X86_DECODE_CMD_JCXZ,
+ X86_DECODE_CMD_SETXX,
+ X86_DECODE_CMD_MOV_TO_SEG,
+ X86_DECODE_CMD_MOV_FROM_SEG,
+ X86_DECODE_CMD_CLI,
+ X86_DECODE_CMD_STI,
+ X86_DECODE_CMD_CLD,
+ X86_DECODE_CMD_STD,
+ X86_DECODE_CMD_STC,
+ X86_DECODE_CMD_CLC,
+ X86_DECODE_CMD_OUT,
+ X86_DECODE_CMD_IN,
+ X86_DECODE_CMD_INS,
+ X86_DECODE_CMD_OUTS,
+ X86_DECODE_CMD_LIDT,
+ X86_DECODE_CMD_SIDT,
+ X86_DECODE_CMD_LGDT,
+ X86_DECODE_CMD_SGDT,
+ X86_DECODE_CMD_SMSW,
+ X86_DECODE_CMD_LMSW,
+ X86_DECODE_CMD_RDTSCP,
+ X86_DECODE_CMD_INVLPG,
+ X86_DECODE_CMD_MOV_TO_CR,
+ X86_DECODE_CMD_MOV_FROM_CR,
+ X86_DECODE_CMD_MOV_TO_DR,
+ X86_DECODE_CMD_MOV_FROM_DR,
+ X86_DECODE_CMD_PUSHF,
+ X86_DECODE_CMD_POPF,
+ X86_DECODE_CMD_CPUID,
+ X86_DECODE_CMD_ROL,
+ X86_DECODE_CMD_ROR,
+ X86_DECODE_CMD_RCL,
+ X86_DECODE_CMD_RCR,
+ X86_DECODE_CMD_SHL,
+ X86_DECODE_CMD_SAL,
+ X86_DECODE_CMD_SHR,
+ X86_DECODE_CMD_SHRD,
+ X86_DECODE_CMD_SHLD,
+ X86_DECODE_CMD_SAR,
+ X86_DECODE_CMD_DIV,
+ X86_DECODE_CMD_IDIV,
+ X86_DECODE_CMD_MUL,
+ X86_DECODE_CMD_IMUL_3,
+ X86_DECODE_CMD_IMUL_2,
+ X86_DECODE_CMD_IMUL_1,
+ X86_DECODE_CMD_MOVS,
+ X86_DECODE_CMD_CMPS,
+ X86_DECODE_CMD_SCAS,
+ X86_DECODE_CMD_LODS,
+ X86_DECODE_CMD_STOS,
+ X86_DECODE_CMD_BSWAP,
+ X86_DECODE_CMD_XCHG,
+ X86_DECODE_CMD_RDTSC,
+ X86_DECODE_CMD_RDMSR,
+ X86_DECODE_CMD_WRMSR,
+ X86_DECODE_CMD_ENTER,
+ X86_DECODE_CMD_LEAVE,
+ X86_DECODE_CMD_BT,
+ X86_DECODE_CMD_BTS,
+ X86_DECODE_CMD_BTC,
+ X86_DECODE_CMD_BTR,
+ X86_DECODE_CMD_BSF,
+ X86_DECODE_CMD_BSR,
+ X86_DECODE_CMD_IRET,
+ X86_DECODE_CMD_INT,
+ X86_DECODE_CMD_POPA,
+ X86_DECODE_CMD_PUSHA,
+ X86_DECODE_CMD_CWD,
+ X86_DECODE_CMD_CBW,
+ X86_DECODE_CMD_DAS,
+ X86_DECODE_CMD_AAD,
+ X86_DECODE_CMD_AAM,
+ X86_DECODE_CMD_AAS,
+ X86_DECODE_CMD_LOOP,
+ X86_DECODE_CMD_SLDT,
+ X86_DECODE_CMD_STR,
+ X86_DECODE_CMD_LLDT,
+ X86_DECODE_CMD_LTR,
+ X86_DECODE_CMD_VERR,
+ X86_DECODE_CMD_VERW,
+ X86_DECODE_CMD_SAHF,
+ X86_DECODE_CMD_LAHF,
+ X86_DECODE_CMD_WBINVD,
+ X86_DECODE_CMD_LDS,
+ X86_DECODE_CMD_LSS,
+ X86_DECODE_CMD_LES,
+ X86_DECODE_XMD_LGS,
+ X86_DECODE_CMD_LFS,
+ X86_DECODE_CMD_CMC,
+ X86_DECODE_CMD_XLAT,
+ X86_DECODE_CMD_NOP,
+ X86_DECODE_CMD_CMOV,
+ X86_DECODE_CMD_CLTS,
+ X86_DECODE_CMD_XADD,
+ X86_DECODE_CMD_HLT,
+ X86_DECODE_CMD_CMPXCHG8B,
+ X86_DECODE_CMD_CMPXCHG,
+ X86_DECODE_CMD_POPCNT,
+
+ X86_DECODE_CMD_FNINIT,
+ X86_DECODE_CMD_FLD,
+ X86_DECODE_CMD_FLDxx,
+ X86_DECODE_CMD_FNSTCW,
+ X86_DECODE_CMD_FNSTSW,
+ X86_DECODE_CMD_FNSETPM,
+ X86_DECODE_CMD_FSAVE,
+ X86_DECODE_CMD_FRSTOR,
+ X86_DECODE_CMD_FXSAVE,
+ X86_DECODE_CMD_FXRSTOR,
+ X86_DECODE_CMD_FDIV,
+ X86_DECODE_CMD_FMUL,
+ X86_DECODE_CMD_FSUB,
+ X86_DECODE_CMD_FADD,
+ X86_DECODE_CMD_EMMS,
+ X86_DECODE_CMD_MFENCE,
+ X86_DECODE_CMD_SFENCE,
+ X86_DECODE_CMD_LFENCE,
+ X86_DECODE_CMD_PREFETCH,
+ X86_DECODE_CMD_CLFLUSH,
+ X86_DECODE_CMD_FST,
+ X86_DECODE_CMD_FABS,
+ X86_DECODE_CMD_FUCOM,
+ X86_DECODE_CMD_FUCOMI,
+ X86_DECODE_CMD_FLDCW,
+ X86_DECODE_CMD_FXCH,
+ X86_DECODE_CMD_FCHS,
+ X86_DECODE_CMD_FCMOV,
+ X86_DECODE_CMD_FRNDINT,
+ X86_DECODE_CMD_FXAM,
+
+ X86_DECODE_CMD_LAST,
+};
+
+const char *decode_cmd_to_string(enum x86_decode_cmd cmd);
+
+typedef struct x86_modrm {
+ union {
+ uint8_t modrm;
+ struct {
+ uint8_t rm:3;
+ uint8_t reg:3;
+ uint8_t mod:2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_modrm;
+
+typedef struct x86_sib {
+ union {
+ uint8_t sib;
+ struct {
+ uint8_t base:3;
+ uint8_t index:3;
+ uint8_t scale:2;
+ };
+ };
+} __attribute__ ((__packed__)) x86_sib;
+
+typedef struct x86_rex {
+ union {
+ uint8_t rex;
+ struct {
+ uint8_t b:1;
+ uint8_t x:1;
+ uint8_t r:1;
+ uint8_t w:1;
+ uint8_t unused:4;
+ };
+ };
+} __attribute__ ((__packed__)) x86_rex;
+
+typedef enum x86_var_type {
+ X86_VAR_IMMEDIATE,
+ X86_VAR_OFFSET,
+ X86_VAR_REG,
+ X86_VAR_RM,
+
+ /* for floating point computations */
+ X87_VAR_REG,
+ X87_VAR_FLOATP,
+ X87_VAR_INTP,
+ X87_VAR_BYTEP,
+} x86_var_type;
+
+typedef struct x86_decode_op {
+ enum x86_var_type type;
+ int size;
+
+ int reg;
+ target_ulong val;
+
+ target_ulong ptr;
+} x86_decode_op;
+
+typedef struct x86_decode {
+ int len;
+ uint8_t opcode[4];
+ uint8_t opcode_len;
+ enum x86_decode_cmd cmd;
+ int addressing_size;
+ int operand_size;
+ int lock;
+ int rep;
+ int op_size_override;
+ int addr_size_override;
+ int segment_override;
+ int control_change_inst;
+ bool fwait;
+ bool fpop_stack;
+ bool frev;
+
+ uint32_t displacement;
+ uint8_t displacement_size;
+ struct x86_rex rex;
+ bool is_modrm;
+ bool sib_present;
+ struct x86_sib sib;
+ struct x86_modrm modrm;
+ struct x86_decode_op op[4];
+ bool is_fpu;
+ uint32_t flags_mask;
+
+} x86_decode;
+
+uint64_t sign(uint64_t val, int size);
+
+uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode);
+
+target_ulong get_reg_ref(CPUX86State *env, int reg, int is_extended, int size);
+target_ulong get_reg_val(CPUX86State *env, int reg, int is_extended, int size);
+void calc_modrm_operand(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+target_ulong decode_linear_addr(CPUX86State *env, struct x86_decode *decode,
+ target_ulong addr, enum X86Seg seg);
+
+void init_decoder(void);
+void calc_modrm_operand16(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void calc_modrm_operand32(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void calc_modrm_operand64(CPUX86State *env, struct x86_decode *decode,
+ struct x86_decode_op *op);
+void set_addressing_size(CPUX86State *env, struct x86_decode *decode);
+void set_operand_size(CPUX86State *env, struct x86_decode *decode);
+
+#endif
diff --git a/target/i386/hvf/x86_descr.c b/target/i386/hvf/x86_descr.c
new file mode 100644
index 0000000000..8c05c34f33
--- /dev/null
+++ b/target/i386/hvf/x86_descr.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "vmx.h"
+#include "x86_descr.h"
+
+#define VMX_SEGMENT_FIELD(seg) \
+ [R_##seg] = { \
+ .selector = VMCS_GUEST_##seg##_SELECTOR, \
+ .base = VMCS_GUEST_##seg##_BASE, \
+ .limit = VMCS_GUEST_##seg##_LIMIT, \
+ .ar_bytes = VMCS_GUEST_##seg##_ACCESS_RIGHTS, \
+}
+
+static const struct vmx_segment_field {
+ int selector;
+ int base;
+ int limit;
+ int ar_bytes;
+} vmx_segment_fields[] = {
+ VMX_SEGMENT_FIELD(ES),
+ VMX_SEGMENT_FIELD(CS),
+ VMX_SEGMENT_FIELD(SS),
+ VMX_SEGMENT_FIELD(DS),
+ VMX_SEGMENT_FIELD(FS),
+ VMX_SEGMENT_FIELD(GS),
+ VMX_SEGMENT_FIELD(LDTR),
+ VMX_SEGMENT_FIELD(TR),
+};
+
+uint32_t vmx_read_segment_limit(CPUState *cpu, X86Seg seg)
+{
+ return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
+}
+
+uint32_t vmx_read_segment_ar(CPUState *cpu, X86Seg seg)
+{
+ return (uint32_t)rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
+}
+
+uint64_t vmx_read_segment_base(CPUState *cpu, X86Seg seg)
+{
+ return rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
+}
+
+x68_segment_selector vmx_read_segment_selector(CPUState *cpu, X86Seg seg)
+{
+ x68_segment_selector sel;
+ sel.sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
+ return sel;
+}
+
+void vmx_write_segment_selector(struct CPUState *cpu, x68_segment_selector selector, X86Seg seg)
+{
+ wvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector, selector.sel);
+}
+
+void vmx_read_segment_descriptor(struct CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
+{
+ desc->sel = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].selector);
+ desc->base = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].base);
+ desc->limit = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].limit);
+ desc->ar = rvmcs(cpu->hvf_fd, vmx_segment_fields[seg].ar_bytes);
+}
+
+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc, X86Seg seg)
+{
+ const struct vmx_segment_field *sf = &vmx_segment_fields[seg];
+
+ wvmcs(cpu->hvf_fd, sf->base, desc->base);
+ wvmcs(cpu->hvf_fd, sf->limit, desc->limit);
+ wvmcs(cpu->hvf_fd, sf->selector, desc->sel);
+ wvmcs(cpu->hvf_fd, sf->ar_bytes, desc->ar);
+}
+
+void x86_segment_descriptor_to_vmx(struct CPUState *cpu, x68_segment_selector selector, struct x86_segment_descriptor *desc, struct vmx_segment *vmx_desc)
+{
+ vmx_desc->sel = selector.sel;
+ vmx_desc->base = x86_segment_base(desc);
+ vmx_desc->limit = x86_segment_limit(desc);
+
+ vmx_desc->ar = (selector.sel ? 0 : 1) << 16 |
+ desc->g << 15 |
+ desc->db << 14 |
+ desc->l << 13 |
+ desc->avl << 12 |
+ desc->p << 7 |
+ desc->dpl << 5 |
+ desc->s << 4 |
+ desc->type;
+}
+
+void vmx_segment_to_x86_descriptor(struct CPUState *cpu, struct vmx_segment *vmx_desc, struct x86_segment_descriptor *desc)
+{
+ x86_set_segment_limit(desc, vmx_desc->limit);
+ x86_set_segment_base(desc, vmx_desc->base);
+
+ desc->type = vmx_desc->ar & 15;
+ desc->s = (vmx_desc->ar >> 4) & 1;
+ desc->dpl = (vmx_desc->ar >> 5) & 3;
+ desc->p = (vmx_desc->ar >> 7) & 1;
+ desc->avl = (vmx_desc->ar >> 12) & 1;
+ desc->l = (vmx_desc->ar >> 13) & 1;
+ desc->db = (vmx_desc->ar >> 14) & 1;
+ desc->g = (vmx_desc->ar >> 15) & 1;
+}
+
diff --git a/target/i386/hvf/x86_descr.h b/target/i386/hvf/x86_descr.h
new file mode 100644
index 0000000000..25a2b1731c
--- /dev/null
+++ b/target/i386/hvf/x86_descr.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HVF_X86_DESCR_H
+#define HVF_X86_DESCR_H 1
+
+#include "x86.h"
+
+typedef struct vmx_segment {
+ uint16_t sel;
+ uint64_t base;
+ uint64_t limit;
+ uint64_t ar;
+} vmx_segment;
+
+/* deal with vmstate descriptors */
+void vmx_read_segment_descriptor(struct CPUState *cpu,
+ struct vmx_segment *desc, enum X86Seg seg);
+void vmx_write_segment_descriptor(CPUState *cpu, struct vmx_segment *desc,
+ enum X86Seg seg);
+
+x68_segment_selector vmx_read_segment_selector(struct CPUState *cpu,
+ enum X86Seg seg);
+void vmx_write_segment_selector(struct CPUState *cpu,
+ x68_segment_selector selector,
+ enum X86Seg seg);
+
+uint64_t vmx_read_segment_base(struct CPUState *cpu, enum X86Seg seg);
+void vmx_write_segment_base(struct CPUState *cpu, enum X86Seg seg,
+ uint64_t base);
+
+void x86_segment_descriptor_to_vmx(struct CPUState *cpu,
+ x68_segment_selector selector,
+ struct x86_segment_descriptor *desc,
+ struct vmx_segment *vmx_desc);
+
+uint32_t vmx_read_segment_limit(CPUState *cpu, enum X86Seg seg);
+uint32_t vmx_read_segment_ar(CPUState *cpu, enum X86Seg seg);
+void vmx_segment_to_x86_descriptor(struct CPUState *cpu,
+ struct vmx_segment *vmx_desc,
+ struct x86_segment_descriptor *desc);
+
+#endif
diff --git a/target/i386/hvf/x86_emu.c b/target/i386/hvf/x86_emu.c
new file mode 100644
index 0000000000..3ea18edc68
--- /dev/null
+++ b/target/i386/hvf/x86_emu.c
@@ -0,0 +1,1483 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+/////////////////////////////////////////////////////////////////////////
+
+#include "qemu/osdep.h"
+#include "panic.h"
+#include "qemu-common.h"
+#include "x86_decode.h"
+#include "x86.h"
+#include "x86_emu.h"
+#include "x86_mmu.h"
+#include "x86_flags.h"
+#include "vmcs.h"
+#include "vmx.h"
+
+void hvf_handle_io(struct CPUState *cpu, uint16_t port, void *data,
+ int direction, int size, uint32_t count);
+
+#define EXEC_2OP_FLAGS_CMD(env, decode, cmd, FLAGS_FUNC, save_res) \
+{ \
+ fetch_operands(env, decode, 2, true, true, false); \
+ switch (decode->operand_size) { \
+ case 1: \
+ { \
+ uint8_t v1 = (uint8_t)decode->op[0].val; \
+ uint8_t v2 = (uint8_t)decode->op[1].val; \
+ uint8_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, decode->op[0].ptr, diff, 1); \
+ } \
+ FLAGS_FUNC##8(env, v1, v2, diff); \
+ break; \
+ } \
+ case 2: \
+ { \
+ uint16_t v1 = (uint16_t)decode->op[0].val; \
+ uint16_t v2 = (uint16_t)decode->op[1].val; \
+ uint16_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, decode->op[0].ptr, diff, 2); \
+ } \
+ FLAGS_FUNC##16(env, v1, v2, diff); \
+ break; \
+ } \
+ case 4: \
+ { \
+ uint32_t v1 = (uint32_t)decode->op[0].val; \
+ uint32_t v2 = (uint32_t)decode->op[1].val; \
+ uint32_t diff = v1 cmd v2; \
+ if (save_res) { \
+ write_val_ext(env, decode->op[0].ptr, diff, 4); \
+ } \
+ FLAGS_FUNC##32(env, v1, v2, diff); \
+ break; \
+ } \
+ default: \
+ VM_PANIC("bad size\n"); \
+ } \
+} \
+
+target_ulong read_reg(CPUX86State *env, int reg, int size)
+{
+ switch (size) {
+ case 1:
+ return env->hvf_emul->regs[reg].lx;
+ case 2:
+ return env->hvf_emul->regs[reg].rx;
+ case 4:
+ return env->hvf_emul->regs[reg].erx;
+ case 8:
+ return env->hvf_emul->regs[reg].rrx;
+ default:
+ abort();
+ }
+ return 0;
+}
+
+void write_reg(CPUX86State *env, int reg, target_ulong val, int size)
+{
+ switch (size) {
+ case 1:
+ env->hvf_emul->regs[reg].lx = val;
+ break;
+ case 2:
+ env->hvf_emul->regs[reg].rx = val;
+ break;
+ case 4:
+ env->hvf_emul->regs[reg].rrx = (uint32_t)val;
+ break;
+ case 8:
+ env->hvf_emul->regs[reg].rrx = val;
+ break;
+ default:
+ abort();
+ }
+}
+
+target_ulong read_val_from_reg(target_ulong reg_ptr, int size)
+{
+ target_ulong val;
+
+ switch (size) {
+ case 1:
+ val = *(uint8_t *)reg_ptr;
+ break;
+ case 2:
+ val = *(uint16_t *)reg_ptr;
+ break;
+ case 4:
+ val = *(uint32_t *)reg_ptr;
+ break;
+ case 8:
+ val = *(uint64_t *)reg_ptr;
+ break;
+ default:
+ abort();
+ }
+ return val;
+}
+
+void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size)
+{
+ switch (size) {
+ case 1:
+ *(uint8_t *)reg_ptr = val;
+ break;
+ case 2:
+ *(uint16_t *)reg_ptr = val;
+ break;
+ case 4:
+ *(uint64_t *)reg_ptr = (uint32_t)val;
+ break;
+ case 8:
+ *(uint64_t *)reg_ptr = val;
+ break;
+ default:
+ abort();
+ }
+}
+
+static bool is_host_reg(struct CPUX86State *env, target_ulong ptr)
+{
+ return (ptr - (target_ulong)&env->hvf_emul->regs[0]) < sizeof(env->hvf_emul->regs);
+}
+
+void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size)
+{
+ if (is_host_reg(env, ptr)) {
+ write_val_to_reg(ptr, val, size);
+ return;
+ }
+ vmx_write_mem(ENV_GET_CPU(env), ptr, &val, size);
+}
+
+uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes)
+{
+ vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, ptr, bytes);
+ return env->hvf_emul->mmio_buf;
+}
+
+
+target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size)
+{
+ target_ulong val;
+ uint8_t *mmio_ptr;
+
+ if (is_host_reg(env, ptr)) {
+ return read_val_from_reg(ptr, size);
+ }
+
+ mmio_ptr = read_mmio(env, ptr, size);
+ switch (size) {
+ case 1:
+ val = *(uint8_t *)mmio_ptr;
+ break;
+ case 2:
+ val = *(uint16_t *)mmio_ptr;
+ break;
+ case 4:
+ val = *(uint32_t *)mmio_ptr;
+ break;
+ case 8:
+ val = *(uint64_t *)mmio_ptr;
+ break;
+ default:
+ VM_PANIC("bad size\n");
+ break;
+ }
+ return val;
+}
+
+static void fetch_operands(struct CPUX86State *env, struct x86_decode *decode,
+ int n, bool val_op0, bool val_op1, bool val_op2)
+{
+ int i;
+ bool calc_val[3] = {val_op0, val_op1, val_op2};
+
+ for (i = 0; i < n; i++) {
+ switch (decode->op[i].type) {
+ case X86_VAR_IMMEDIATE:
+ break;
+ case X86_VAR_REG:
+ VM_PANIC_ON(!decode->op[i].ptr);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_from_reg(decode->op[i].ptr,
+ decode->operand_size);
+ }
+ break;
+ case X86_VAR_RM:
+ calc_modrm_operand(env, decode, &decode->op[i]);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
+ decode->operand_size);
+ }
+ break;
+ case X86_VAR_OFFSET:
+ decode->op[i].ptr = decode_linear_addr(env, decode,
+ decode->op[i].ptr,
+ R_DS);
+ if (calc_val[i]) {
+ decode->op[i].val = read_val_ext(env, decode->op[i].ptr,
+ decode->operand_size);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void exec_mov(struct CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 2, false, true, false);
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
+ decode->operand_size);
+
+ RIP(env) += decode->len;
+}
+
+static void exec_add(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_or(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, |, SET_FLAGS_OSZAPC_LOGIC, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_adc(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +get_CF(env)+, SET_FLAGS_OSZAPC_ADD, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_sbb(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -get_CF(env)-, SET_FLAGS_OSZAPC_SUB, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_and(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_sub(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_xor(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, ^, SET_FLAGS_OSZAPC_LOGIC, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_neg(struct CPUX86State *env, struct x86_decode *decode)
+{
+ /*EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);*/
+ int32_t val;
+ fetch_operands(env, decode, 2, true, true, false);
+
+ val = 0 - sign(decode->op[1].val, decode->operand_size);
+ write_val_ext(env, decode->op[1].ptr, val, decode->operand_size);
+
+ if (4 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB32(env, 0, 0 - val, val);
+ } else if (2 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB16(env, 0, 0 - val, val);
+ } else if (1 == decode->operand_size) {
+ SET_FLAGS_OSZAPC_SUB8(env, 0, 0 - val, val);
+ } else {
+ VM_PANIC("bad op size\n");
+ }
+
+ /*lflags_to_rflags(env);*/
+ RIP(env) += decode->len;
+}
+
+static void exec_cmp(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+ RIP(env) += decode->len;
+}
+
+static void exec_inc(struct CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+
+ EXEC_2OP_FLAGS_CMD(env, decode, +1+, SET_FLAGS_OSZAP_ADD, true);
+
+ RIP(env) += decode->len;
+}
+
+static void exec_dec(struct CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = 0;
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -1-, SET_FLAGS_OSZAP_SUB, true);
+ RIP(env) += decode->len;
+}
+
+static void exec_tst(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, &, SET_FLAGS_OSZAPC_LOGIC, false);
+ RIP(env) += decode->len;
+}
+
+static void exec_not(struct CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 1, true, false, false);
+
+ write_val_ext(env, decode->op[0].ptr, ~decode->op[0].val,
+ decode->operand_size);
+ RIP(env) += decode->len;
+}
+
+void exec_movzx(struct CPUX86State *env, struct x86_decode *decode)
+{
+ int src_op_size;
+ int op_size = decode->operand_size;
+
+ fetch_operands(env, decode, 1, false, false, false);
+
+ if (0xb6 == decode->opcode[1]) {
+ src_op_size = 1;
+ } else {
+ src_op_size = 2;
+ }
+ decode->operand_size = src_op_size;
+ calc_modrm_operand(env, decode, &decode->op[1]);
+ decode->op[1].val = read_val_ext(env, decode->op[1].ptr, src_op_size);
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
+
+ RIP(env) += decode->len;
+}
+
+static void exec_out(struct CPUX86State *env, struct x86_decode *decode)
+{
+ switch (decode->opcode[0]) {
+ case 0xe6:
+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 1, 1, 1);
+ break;
+ case 0xe7:
+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &RAX(env), 1,
+ decode->operand_size, 1);
+ break;
+ case 0xee:
+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 1, 1, 1);
+ break;
+ case 0xef:
+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &RAX(env), 1, decode->operand_size, 1);
+ break;
+ default:
+ VM_PANIC("Bad out opcode\n");
+ break;
+ }
+ RIP(env) += decode->len;
+}
+
+static void exec_in(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong val = 0;
+ switch (decode->opcode[0]) {
+ case 0xe4:
+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 0, 1, 1);
+ break;
+ case 0xe5:
+ hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &val, 0, decode->operand_size, 1);
+ if (decode->operand_size == 2) {
+ AX(env) = val;
+ } else {
+ RAX(env) = (uint32_t)val;
+ }
+ break;
+ case 0xec:
+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 0, 1, 1);
+ break;
+ case 0xed:
+ hvf_handle_io(ENV_GET_CPU(env), DX(env), &val, 0, decode->operand_size, 1);
+ if (decode->operand_size == 2) {
+ AX(env) = val;
+ } else {
+ RAX(env) = (uint32_t)val;
+ }
+
+ break;
+ default:
+ VM_PANIC("Bad in opcode\n");
+ break;
+ }
+
+ RIP(env) += decode->len;
+}
+
+static inline void string_increment_reg(struct CPUX86State *env, int reg,
+ struct x86_decode *decode)
+{
+ target_ulong val = read_reg(env, reg, decode->addressing_size);
+ if (env->hvf_emul->rflags.df) {
+ val -= decode->operand_size;
+ } else {
+ val += decode->operand_size;
+ }
+ write_reg(env, reg, val, decode->addressing_size);
+}
+
+static inline void string_rep(struct CPUX86State *env, struct x86_decode *decode,
+ void (*func)(struct CPUX86State *env,
+ struct x86_decode *ins), int rep)
+{
+ target_ulong rcx = read_reg(env, R_ECX, decode->addressing_size);
+ while (rcx--) {
+ func(env, decode);
+ write_reg(env, R_ECX, rcx, decode->addressing_size);
+ if ((PREFIX_REP == rep) && !get_ZF(env)) {
+ break;
+ }
+ if ((PREFIX_REPN == rep) && get_ZF(env)) {
+ break;
+ }
+ }
+}
+
+static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
+ R_ES);
+
+ hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 0,
+ decode->operand_size, 1);
+ vmx_write_mem(ENV_GET_CPU(env), addr, env->hvf_emul->mmio_buf, decode->operand_size);
+
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_ins(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_ins_single, 0);
+ } else {
+ exec_ins_single(env, decode);
+ }
+
+ RIP(env) += decode->len;
+}
+
+static void exec_outs_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+
+ vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, addr, decode->operand_size);
+ hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 1,
+ decode->operand_size, 1);
+
+ string_increment_reg(env, R_ESI, decode);
+}
+
+static void exec_outs(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_outs_single, 0);
+ } else {
+ exec_outs_single(env, decode);
+ }
+
+ RIP(env) += decode->len;
+}
+
+static void exec_movs_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong src_addr;
+ target_ulong dst_addr;
+ target_ulong val;
+
+ src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
+ R_ES);
+
+ val = read_val_ext(env, src_addr, decode->operand_size);
+ write_val_ext(env, dst_addr, val, decode->operand_size);
+
+ string_increment_reg(env, R_ESI, decode);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_movs(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_movs_single, 0);
+ } else {
+ exec_movs_single(env, decode);
+ }
+
+ RIP(env) += decode->len;
+}
+
+static void exec_cmps_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong src_addr;
+ target_ulong dst_addr;
+
+ src_addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size,
+ R_ES);
+
+ decode->op[0].type = X86_VAR_IMMEDIATE;
+ decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size);
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ decode->op[1].val = read_val_ext(env, dst_addr, decode->operand_size);
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+
+ string_increment_reg(env, R_ESI, decode);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_cmps(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_cmps_single, decode->rep);
+ } else {
+ exec_cmps_single(env, decode);
+ }
+ RIP(env) += decode->len;
+}
+
+
+static void exec_stos_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+ target_ulong val;
+
+ addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES);
+ val = read_reg(env, R_EAX, decode->operand_size);
+ vmx_write_mem(ENV_GET_CPU(env), addr, &val, decode->operand_size);
+
+ string_increment_reg(env, R_EDI, decode);
+}
+
+
+static void exec_stos(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_stos_single, 0);
+ } else {
+ exec_stos_single(env, decode);
+ }
+
+ RIP(env) += decode->len;
+}
+
+static void exec_scas_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+
+ addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES);
+ decode->op[1].type = X86_VAR_IMMEDIATE;
+ vmx_read_mem(ENV_GET_CPU(env), &decode->op[1].val, addr, decode->operand_size);
+
+ EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false);
+ string_increment_reg(env, R_EDI, decode);
+}
+
+static void exec_scas(struct CPUX86State *env, struct x86_decode *decode)
+{
+ decode->op[0].type = X86_VAR_REG;
+ decode->op[0].reg = R_EAX;
+ if (decode->rep) {
+ string_rep(env, decode, exec_scas_single, decode->rep);
+ } else {
+ exec_scas_single(env, decode);
+ }
+
+ RIP(env) += decode->len;
+}
+
+static void exec_lods_single(struct CPUX86State *env, struct x86_decode *decode)
+{
+ target_ulong addr;
+ target_ulong val = 0;
+
+ addr = decode_linear_addr(env, decode, RSI(env), R_DS);
+ vmx_read_mem(ENV_GET_CPU(env), &val, addr, decode->operand_size);
+ write_reg(env, R_EAX, val, decode->operand_size);
+
+ string_increment_reg(env, R_ESI, decode);
+}
+
+static void exec_lods(struct CPUX86State *env, struct x86_decode *decode)
+{
+ if (decode->rep) {
+ string_rep(env, decode, exec_lods_single, 0);
+ } else {
+ exec_lods_single(env, decode);
+ }
+
+ RIP(env) += decode->len;
+}
+
+#define MSR_IA32_UCODE_REV 0x00000017
+
+void simulate_rdmsr(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint32_t msr = ECX(env);
+ uint64_t val = 0;
+
+ switch (msr) {
+ case MSR_IA32_TSC:
+ val = rdtscp() + rvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET);
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(X86_CPU(cpu)->apic_state);
+ break;
+ case MSR_IA32_UCODE_REV:
+ val = (0x100000000ULL << 32) | 0x100000000ULL;
+ break;
+ case MSR_EFER:
+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER);
+ break;
+ case MSR_FSBASE:
+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE);
+ break;
+ case MSR_GSBASE:
+ val = rvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE);
+ break;
+ case MSR_KERNELGSBASE:
+ val = rvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE);
+ break;
+ case MSR_STAR:
+ abort();
+ break;
+ case MSR_LSTAR:
+ abort();
+ break;
+ case MSR_CSTAR:
+ abort();
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ default:
+ /* fprintf(stderr, "%s: unknown msr 0x%x\n", __func__, msr); */
+ val = 0;
+ break;
+ }
+
+ RAX(env) = (uint32_t)val;
+ RDX(env) = (uint32_t)(val >> 32);
+}
+
+static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ simulate_rdmsr(ENV_GET_CPU(env));
+ RIP(env) += decode->len;
+}
+
+void simulate_wrmsr(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ uint32_t msr = ECX(env);
+ uint64_t data = ((uint64_t)EDX(env) << 32) | EAX(env);
+
+ switch (msr) {
+ case MSR_IA32_TSC:
+ /* if (!osx_is_sierra())
+ wvmcs(cpu->hvf_fd, VMCS_TSC_OFFSET, data - rdtscp());
+ hv_vm_sync_tsc(data);*/
+ break;
+ case MSR_IA32_APICBASE:
+ cpu_set_apic_base(X86_CPU(cpu)->apic_state, data);
+ break;
+ case MSR_FSBASE:
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_FS_BASE, data);
+ break;
+ case MSR_GSBASE:
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_GS_BASE, data);
+ break;
+ case MSR_KERNELGSBASE:
+ wvmcs(cpu->hvf_fd, VMCS_HOST_FS_BASE, data);
+ break;
+ case MSR_STAR:
+ abort();
+ break;
+ case MSR_LSTAR:
+ abort();
+ break;
+ case MSR_CSTAR:
+ abort();
+ break;
+ case MSR_EFER:
+ /*printf("new efer %llx\n", EFER(cpu));*/
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_IA32_EFER, data);
+ if (data & MSR_EFER_NXE) {
+ hv_vcpu_invalidate_tlb(cpu->hvf_fd);
+ }
+ break;
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysBase(0)) / 2].base = data;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[(ECX(env) - MSR_MTRRphysMask(0)) / 2].mask = data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix64K_00000] = data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix16K_80000 + 1] = data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[ECX(env) - MSR_MTRRfix4K_C0000 + 3] = data;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = data;
+ break;
+ default:
+ break;
+ }
+
+ /* Related to support known hypervisor interface */
+ /* if (g_hypervisor_iface)
+ g_hypervisor_iface->wrmsr_handler(cpu, msr, data);
+
+ printf("write msr %llx\n", RCX(cpu));*/
+}
+
+static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ simulate_wrmsr(ENV_GET_CPU(env));
+ RIP(env) += decode->len;
+}
+
+/*
+ * flag:
+ * 0 - bt, 1 - btc, 2 - bts, 3 - btr
+ */
+static void do_bt(struct CPUX86State *env, struct x86_decode *decode, int flag)
+{
+ int32_t displacement;
+ uint8_t index;
+ bool cf;
+ int mask = (4 == decode->operand_size) ? 0x1f : 0xf;
+
+ VM_PANIC_ON(decode->rex.rex);
+
+ fetch_operands(env, decode, 2, false, true, false);
+ index = decode->op[1].val & mask;
+
+ if (decode->op[0].type != X86_VAR_REG) {
+ if (4 == decode->operand_size) {
+ displacement = ((int32_t) (decode->op[1].val & 0xffffffe0)) / 32;
+ decode->op[0].ptr += 4 * displacement;
+ } else if (2 == decode->operand_size) {
+ displacement = ((int16_t) (decode->op[1].val & 0xfff0)) / 16;
+ decode->op[0].ptr += 2 * displacement;
+ } else {
+ VM_PANIC("bt 64bit\n");
+ }
+ }
+ decode->op[0].val = read_val_ext(env, decode->op[0].ptr,
+ decode->operand_size);
+ cf = (decode->op[0].val >> index) & 0x01;
+
+ switch (flag) {
+ case 0:
+ set_CF(env, cf);
+ return;
+ case 1:
+ decode->op[0].val ^= (1u << index);
+ break;
+ case 2:
+ decode->op[0].val |= (1u << index);
+ break;
+ case 3:
+ decode->op[0].val &= ~(1u << index);
+ break;
+ }
+ write_val_ext(env, decode->op[0].ptr, decode->op[0].val,
+ decode->operand_size);
+ set_CF(env, cf);
+}
+
+static void exec_bt(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 0);
+ RIP(env) += decode->len;
+}
+
+static void exec_btc(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 1);
+ RIP(env) += decode->len;
+}
+
+static void exec_btr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 3);
+ RIP(env) += decode->len;
+}
+
+static void exec_bts(struct CPUX86State *env, struct x86_decode *decode)
+{
+ do_bt(env, decode, 2);
+ RIP(env) += decode->len;
+}
+
+void exec_shl(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+
+ count = decode->op[1].val;
+ count &= 0x1f; /* count is masked to 5 bits*/
+ if (!count) {
+ goto exit;
+ }
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t res = 0;
+ if (count <= 8) {
+ res = (decode->op[0].val << count);
+ cf = (decode->op[0].val >> (8 - count)) & 0x1;
+ of = cf ^ (res >> 7);
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+ SET_FLAGS_OSZAPC_LOGIC8(env, 0, 0, res);
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t res = 0;
+
+ /* from bochs */
+ if (count <= 16) {
+ res = (decode->op[0].val << count);
+ cf = (decode->op[0].val >> (16 - count)) & 0x1;
+ of = cf ^ (res >> 15); /* of = cf ^ result15 */
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+ SET_FLAGS_OSZAPC_LOGIC16(env, 0, 0, res);
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res = decode->op[0].val << count;
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+ SET_FLAGS_OSZAPC_LOGIC32(env, 0, 0, res);
+ cf = (decode->op[0].val >> (32 - count)) & 0x1;
+ of = cf ^ (res >> 31); /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ default:
+ abort();
+ }
+
+exit:
+ /* lflags_to_rflags(env); */
+ RIP(env) += decode->len;
+}
+
+void exec_movsx(CPUX86State *env, struct x86_decode *decode)
+{
+ int src_op_size;
+ int op_size = decode->operand_size;
+
+ fetch_operands(env, decode, 2, false, false, false);
+
+ if (0xbe == decode->opcode[1]) {
+ src_op_size = 1;
+ } else {
+ src_op_size = 2;
+ }
+
+ decode->operand_size = src_op_size;
+ calc_modrm_operand(env, decode, &decode->op[1]);
+ decode->op[1].val = sign(read_val_ext(env, decode->op[1].ptr, src_op_size),
+ src_op_size);
+
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val, op_size);
+
+ RIP(env) += decode->len;
+}
+
+void exec_ror(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint32_t bit6, bit7;
+ uint8_t res;
+
+ if ((count & 0x07) == 0) {
+ if (count & 0x18) {
+ bit6 = ((uint8_t)decode->op[0].val >> 6) & 1;
+ bit7 = ((uint8_t)decode->op[0].val >> 7) & 1;
+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
+ }
+ } else {
+ count &= 0x7; /* use only bottom 3 bits */
+ res = ((uint8_t)decode->op[0].val >> count) |
+ ((uint8_t)decode->op[0].val << (8 - count));
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+ bit6 = (res >> 6) & 1;
+ bit7 = (res >> 7) & 1;
+ /* set eflags: ROR count affects the following flags: C, O */
+ SET_FLAGS_OxxxxC(env, bit6 ^ bit7, bit7);
+ }
+ break;
+ }
+ case 2:
+ {
+ uint32_t bit14, bit15;
+ uint16_t res;
+
+ if ((count & 0x0f) == 0) {
+ if (count & 0x10) {
+ bit14 = ((uint16_t)decode->op[0].val >> 14) & 1;
+ bit15 = ((uint16_t)decode->op[0].val >> 15) & 1;
+ /* of = result14 ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
+ }
+ } else {
+ count &= 0x0f; /* use only 4 LSB's */
+ res = ((uint16_t)decode->op[0].val >> count) |
+ ((uint16_t)decode->op[0].val << (16 - count));
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+
+ bit14 = (res >> 14) & 1;
+ bit15 = (res >> 15) & 1;
+ /* of = result14 ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit14 ^ bit15, bit15);
+ }
+ break;
+ }
+ case 4:
+ {
+ uint32_t bit31, bit30;
+ uint32_t res;
+
+ count &= 0x1f;
+ if (count) {
+ res = ((uint32_t)decode->op[0].val >> count) |
+ ((uint32_t)decode->op[0].val << (32 - count));
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+
+ bit31 = (res >> 31) & 1;
+ bit30 = (res >> 30) & 1;
+ /* of = result30 ^ result31 */
+ SET_FLAGS_OxxxxC(env, bit30 ^ bit31, bit31);
+ }
+ break;
+ }
+ }
+ RIP(env) += decode->len;
+}
+
+void exec_rol(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint32_t bit0, bit7;
+ uint8_t res;
+
+ if ((count & 0x07) == 0) {
+ if (count & 0x18) {
+ bit0 = ((uint8_t)decode->op[0].val & 1);
+ bit7 = ((uint8_t)decode->op[0].val >> 7);
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
+ }
+ } else {
+ count &= 0x7; /* use only lowest 3 bits */
+ res = ((uint8_t)decode->op[0].val << count) |
+ ((uint8_t)decode->op[0].val >> (8 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+ /* set eflags:
+ * ROL count affects the following flags: C, O
+ */
+ bit0 = (res & 1);
+ bit7 = (res >> 7);
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit7, bit0);
+ }
+ break;
+ }
+ case 2:
+ {
+ uint32_t bit0, bit15;
+ uint16_t res;
+
+ if ((count & 0x0f) == 0) {
+ if (count & 0x10) {
+ bit0 = ((uint16_t)decode->op[0].val & 0x1);
+ bit15 = ((uint16_t)decode->op[0].val >> 15);
+ /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
+ }
+ } else {
+ count &= 0x0f; /* only use bottom 4 bits */
+ res = ((uint16_t)decode->op[0].val << count) |
+ ((uint16_t)decode->op[0].val >> (16 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+ bit0 = (res & 0x1);
+ bit15 = (res >> 15);
+ /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit15, bit0);
+ }
+ break;
+ }
+ case 4:
+ {
+ uint32_t bit0, bit31;
+ uint32_t res;
+
+ count &= 0x1f;
+ if (count) {
+ res = ((uint32_t)decode->op[0].val << count) |
+ ((uint32_t)decode->op[0].val >> (32 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+ bit0 = (res & 0x1);
+ bit31 = (res >> 31);
+ /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, bit0 ^ bit31, bit0);
+ }
+ break;
+ }
+ }
+ RIP(env) += decode->len;
+}
+
+
+void exec_rcl(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val & 0x1f;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t op1_8 = decode->op[0].val;
+ uint8_t res;
+ count %= 9;
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_8 << 1) | get_CF(env);
+ } else {
+ res = (op1_8 << count) | (get_CF(env) << (count - 1)) |
+ (op1_8 >> (9 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+
+ cf = (op1_8 >> (8 - count)) & 0x01;
+ of = cf ^ (res >> 7); /* of = cf ^ result7 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t res;
+ uint16_t op1_16 = decode->op[0].val;
+
+ count %= 17;
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_16 << 1) | get_CF(env);
+ } else if (count == 16) {
+ res = (get_CF(env) << 15) | (op1_16 >> 1);
+ } else { /* 2..15 */
+ res = (op1_16 << count) | (get_CF(env) << (count - 1)) |
+ (op1_16 >> (17 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+
+ cf = (op1_16 >> (16 - count)) & 0x1;
+ of = cf ^ (res >> 15); /* of = cf ^ result15 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res;
+ uint32_t op1_32 = decode->op[0].val;
+
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_32 << 1) | get_CF(env);
+ } else {
+ res = (op1_32 << count) | (get_CF(env) << (count - 1)) |
+ (op1_32 >> (33 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+
+ cf = (op1_32 >> (32 - count)) & 0x1;
+ of = cf ^ (res >> 31); /* of = cf ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ }
+ RIP(env) += decode->len;
+}
+
+void exec_rcr(struct CPUX86State *env, struct x86_decode *decode)
+{
+ uint8_t count;
+ int of = 0, cf = 0;
+
+ fetch_operands(env, decode, 2, true, true, false);
+ count = decode->op[1].val & 0x1f;
+
+ switch (decode->operand_size) {
+ case 1:
+ {
+ uint8_t op1_8 = decode->op[0].val;
+ uint8_t res;
+
+ count %= 9;
+ if (!count) {
+ break;
+ }
+ res = (op1_8 >> count) | (get_CF(env) << (8 - count)) |
+ (op1_8 << (9 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 1);
+
+ cf = (op1_8 >> (count - 1)) & 0x1;
+ of = (((res << 1) ^ res) >> 7) & 0x1; /* of = result6 ^ result7 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 2:
+ {
+ uint16_t op1_16 = decode->op[0].val;
+ uint16_t res;
+
+ count %= 17;
+ if (!count) {
+ break;
+ }
+ res = (op1_16 >> count) | (get_CF(env) << (16 - count)) |
+ (op1_16 << (17 - count));
+
+ write_val_ext(env, decode->op[0].ptr, res, 2);
+
+ cf = (op1_16 >> (count - 1)) & 0x1;
+ of = ((uint16_t)((res << 1) ^ res) >> 15) & 0x1; /* of = result15 ^
+ result14 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ case 4:
+ {
+ uint32_t res;
+ uint32_t op1_32 = decode->op[0].val;
+
+ if (!count) {
+ break;
+ }
+
+ if (1 == count) {
+ res = (op1_32 >> 1) | (get_CF(env) << 31);
+ } else {
+ res = (op1_32 >> count) | (get_CF(env) << (32 - count)) |
+ (op1_32 << (33 - count));
+ }
+
+ write_val_ext(env, decode->op[0].ptr, res, 4);
+
+ cf = (op1_32 >> (count - 1)) & 0x1;
+ of = ((res << 1) ^ res) >> 31; /* of = result30 ^ result31 */
+ SET_FLAGS_OxxxxC(env, of, cf);
+ break;
+ }
+ }
+ RIP(env) += decode->len;
+}
+
+static void exec_xchg(struct CPUX86State *env, struct x86_decode *decode)
+{
+ fetch_operands(env, decode, 2, true, true, false);
+
+ write_val_ext(env, decode->op[0].ptr, decode->op[1].val,
+ decode->operand_size);
+ write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
+ decode->operand_size);
+
+ RIP(env) += decode->len;
+}
+
+static void exec_xadd(struct CPUX86State *env, struct x86_decode *decode)
+{
+ EXEC_2OP_FLAGS_CMD(env, decode, +, SET_FLAGS_OSZAPC_ADD, true);
+ write_val_ext(env, decode->op[1].ptr, decode->op[0].val,
+ decode->operand_size);
+
+ RIP(env) += decode->len;
+}
+
+static struct cmd_handler {
+ enum x86_decode_cmd cmd;
+ void (*handler)(struct CPUX86State *env, struct x86_decode *ins);
+} handlers[] = {
+ {X86_DECODE_CMD_INVL, NULL,},
+ {X86_DECODE_CMD_MOV, exec_mov},
+ {X86_DECODE_CMD_ADD, exec_add},
+ {X86_DECODE_CMD_OR, exec_or},
+ {X86_DECODE_CMD_ADC, exec_adc},
+ {X86_DECODE_CMD_SBB, exec_sbb},
+ {X86_DECODE_CMD_AND, exec_and},
+ {X86_DECODE_CMD_SUB, exec_sub},
+ {X86_DECODE_CMD_NEG, exec_neg},
+ {X86_DECODE_CMD_XOR, exec_xor},
+ {X86_DECODE_CMD_CMP, exec_cmp},
+ {X86_DECODE_CMD_INC, exec_inc},
+ {X86_DECODE_CMD_DEC, exec_dec},
+ {X86_DECODE_CMD_TST, exec_tst},
+ {X86_DECODE_CMD_NOT, exec_not},
+ {X86_DECODE_CMD_MOVZX, exec_movzx},
+ {X86_DECODE_CMD_OUT, exec_out},
+ {X86_DECODE_CMD_IN, exec_in},
+ {X86_DECODE_CMD_INS, exec_ins},
+ {X86_DECODE_CMD_OUTS, exec_outs},
+ {X86_DECODE_CMD_RDMSR, exec_rdmsr},
+ {X86_DECODE_CMD_WRMSR, exec_wrmsr},
+ {X86_DECODE_CMD_BT, exec_bt},
+ {X86_DECODE_CMD_BTR, exec_btr},
+ {X86_DECODE_CMD_BTC, exec_btc},
+ {X86_DECODE_CMD_BTS, exec_bts},
+ {X86_DECODE_CMD_SHL, exec_shl},
+ {X86_DECODE_CMD_ROL, exec_rol},
+ {X86_DECODE_CMD_ROR, exec_ror},
+ {X86_DECODE_CMD_RCR, exec_rcr},
+ {X86_DECODE_CMD_RCL, exec_rcl},
+ /*{X86_DECODE_CMD_CPUID, exec_cpuid},*/
+ {X86_DECODE_CMD_MOVS, exec_movs},
+ {X86_DECODE_CMD_CMPS, exec_cmps},
+ {X86_DECODE_CMD_STOS, exec_stos},
+ {X86_DECODE_CMD_SCAS, exec_scas},
+ {X86_DECODE_CMD_LODS, exec_lods},
+ {X86_DECODE_CMD_MOVSX, exec_movsx},
+ {X86_DECODE_CMD_XCHG, exec_xchg},
+ {X86_DECODE_CMD_XADD, exec_xadd},
+};
+
+static struct cmd_handler _cmd_handler[X86_DECODE_CMD_LAST];
+
+static void init_cmd_handler()
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ _cmd_handler[handlers[i].cmd] = handlers[i];
+ }
+}
+
+void load_regs(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ int i = 0;
+ RRX(env, R_EAX) = rreg(cpu->hvf_fd, HV_X86_RAX);
+ RRX(env, R_EBX) = rreg(cpu->hvf_fd, HV_X86_RBX);
+ RRX(env, R_ECX) = rreg(cpu->hvf_fd, HV_X86_RCX);
+ RRX(env, R_EDX) = rreg(cpu->hvf_fd, HV_X86_RDX);
+ RRX(env, R_ESI) = rreg(cpu->hvf_fd, HV_X86_RSI);
+ RRX(env, R_EDI) = rreg(cpu->hvf_fd, HV_X86_RDI);
+ RRX(env, R_ESP) = rreg(cpu->hvf_fd, HV_X86_RSP);
+ RRX(env, R_EBP) = rreg(cpu->hvf_fd, HV_X86_RBP);
+ for (i = 8; i < 16; i++) {
+ RRX(env, i) = rreg(cpu->hvf_fd, HV_X86_RAX + i);
+ }
+
+ RFLAGS(env) = rreg(cpu->hvf_fd, HV_X86_RFLAGS);
+ rflags_to_lflags(env);
+ RIP(env) = rreg(cpu->hvf_fd, HV_X86_RIP);
+}
+
+void store_regs(struct CPUState *cpu)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ int i = 0;
+ wreg(cpu->hvf_fd, HV_X86_RAX, RAX(env));
+ wreg(cpu->hvf_fd, HV_X86_RBX, RBX(env));
+ wreg(cpu->hvf_fd, HV_X86_RCX, RCX(env));
+ wreg(cpu->hvf_fd, HV_X86_RDX, RDX(env));
+ wreg(cpu->hvf_fd, HV_X86_RSI, RSI(env));
+ wreg(cpu->hvf_fd, HV_X86_RDI, RDI(env));
+ wreg(cpu->hvf_fd, HV_X86_RBP, RBP(env));
+ wreg(cpu->hvf_fd, HV_X86_RSP, RSP(env));
+ for (i = 8; i < 16; i++) {
+ wreg(cpu->hvf_fd, HV_X86_RAX + i, RRX(env, i));
+ }
+
+ lflags_to_rflags(env);
+ wreg(cpu->hvf_fd, HV_X86_RFLAGS, RFLAGS(env));
+ macvm_set_rip(cpu, RIP(env));
+}
+
+bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins)
+{
+ /*if (hvf_vcpu_id(cpu))
+ printf("%d, %llx: exec_instruction %s\n", hvf_vcpu_id(cpu), RIP(cpu),
+ decode_cmd_to_string(ins->cmd));*/
+
+ if (!_cmd_handler[ins->cmd].handler) {
+ printf("Unimplemented handler (%llx) for %d (%x %x) \n", RIP(env),
+ ins->cmd, ins->opcode[0],
+ ins->opcode_len > 1 ? ins->opcode[1] : 0);
+ RIP(env) += ins->len;
+ return true;
+ }
+
+ _cmd_handler[ins->cmd].handler(env, ins);
+ return true;
+}
+
+void init_emu()
+{
+ init_cmd_handler();
+}
diff --git a/target/i386/hvf/x86_emu.h b/target/i386/hvf/x86_emu.h
new file mode 100644
index 0000000000..fbb4832576
--- /dev/null
+++ b/target/i386/hvf/x86_emu.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __X86_EMU_H__
+#define __X86_EMU_H__
+
+#include "x86.h"
+#include "x86_decode.h"
+#include "cpu.h"
+
+void init_emu(void);
+bool exec_instruction(struct CPUX86State *env, struct x86_decode *ins);
+
+void load_regs(struct CPUState *cpu);
+void store_regs(struct CPUState *cpu);
+
+void simulate_rdmsr(struct CPUState *cpu);
+void simulate_wrmsr(struct CPUState *cpu);
+
+target_ulong read_reg(CPUX86State *env, int reg, int size);
+void write_reg(CPUX86State *env, int reg, target_ulong val, int size);
+target_ulong read_val_from_reg(target_ulong reg_ptr, int size);
+void write_val_to_reg(target_ulong reg_ptr, target_ulong val, int size);
+void write_val_ext(struct CPUX86State *env, target_ulong ptr, target_ulong val, int size);
+uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes);
+target_ulong read_val_ext(struct CPUX86State *env, target_ulong ptr, int size);
+
+void exec_movzx(struct CPUX86State *env, struct x86_decode *decode);
+void exec_shl(struct CPUX86State *env, struct x86_decode *decode);
+void exec_movsx(struct CPUX86State *env, struct x86_decode *decode);
+void exec_ror(struct CPUX86State *env, struct x86_decode *decode);
+void exec_rol(struct CPUX86State *env, struct x86_decode *decode);
+void exec_rcl(struct CPUX86State *env, struct x86_decode *decode);
+void exec_rcr(struct CPUX86State *env, struct x86_decode *decode);
+#endif
diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c
new file mode 100644
index 0000000000..ee6d33f861
--- /dev/null
+++ b/target/i386/hvf/x86_flags.c
@@ -0,0 +1,315 @@
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+// Copyright (C) 2017 Google Inc.
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+/////////////////////////////////////////////////////////////////////////
+/*
+ * flags functions
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "panic.h"
+#include "cpu.h"
+#include "x86_flags.h"
+#include "x86.h"
+
+
+/* this is basically bocsh code */
+
+#define LF_SIGN_BIT 31
+
+#define LF_BIT_SD (0) /* lazy Sign Flag Delta */
+#define LF_BIT_AF (3) /* lazy Adjust flag */
+#define LF_BIT_PDB (8) /* lazy Parity Delta Byte (8 bits) */
+#define LF_BIT_CF (31) /* lazy Carry Flag */
+#define LF_BIT_PO (30) /* lazy Partial Overflow = CF ^ OF */
+
+#define LF_MASK_SD (0x01 << LF_BIT_SD)
+#define LF_MASK_AF (0x01 << LF_BIT_AF)
+#define LF_MASK_PDB (0xFF << LF_BIT_PDB)
+#define LF_MASK_CF (0x01 << LF_BIT_CF)
+#define LF_MASK_PO (0x01 << LF_BIT_PO)
+
+#define ADD_COUT_VEC(op1, op2, result) \
+ (((op1) & (op2)) | (((op1) | (op2)) & (~(result))))
+
+#define SUB_COUT_VEC(op1, op2, result) \
+ (((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result)))
+
+#define GET_ADD_OVERFLOW(op1, op2, result, mask) \
+ ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask))
+
+/* ******************* */
+/* OSZAPC */
+/* ******************* */
+
+/* size, carries, result */
+#define SET_FLAGS_OSZAPC_SIZE(size, lf_carries, lf_result) { \
+ target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
+ (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
+ env->hvf_emul->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
+ if ((size) == 32) { \
+ temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
+ } else if ((size) == 16) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
+ } else if ((size) == 8) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
+ } else { \
+ VM_PANIC("unimplemented"); \
+ } \
+ env->hvf_emul->lflags.auxbits = (target_ulong)(uint32_t)temp; \
+}
+
+/* carries, result */
+#define SET_FLAGS_OSZAPC_8(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(8, carries, result)
+#define SET_FLAGS_OSZAPC_16(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(16, carries, result)
+#define SET_FLAGS_OSZAPC_32(carries, result) \
+ SET_FLAGS_OSZAPC_SIZE(32, carries, result)
+
+/* ******************* */
+/* OSZAP */
+/* ******************* */
+/* size, carries, result */
+#define SET_FLAGS_OSZAP_SIZE(size, lf_carries, lf_result) { \
+ target_ulong temp = ((lf_carries) & (LF_MASK_AF)) | \
+ (((lf_carries) >> (size - 2)) << LF_BIT_PO); \
+ if ((size) == 32) { \
+ temp = ((lf_carries) & ~(LF_MASK_PDB | LF_MASK_SD)); \
+ } else if ((size) == 16) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 16); \
+ } else if ((size) == 8) { \
+ temp = ((lf_carries) & (LF_MASK_AF)) | ((lf_carries) << 24); \
+ } else { \
+ VM_PANIC("unimplemented"); \
+ } \
+ env->hvf_emul->lflags.result = (target_ulong)(int##size##_t)(lf_result); \
+ target_ulong delta_c = (env->hvf_emul->lflags.auxbits ^ temp) & LF_MASK_CF; \
+ delta_c ^= (delta_c >> 1); \
+ env->hvf_emul->lflags.auxbits = (target_ulong)(uint32_t)(temp ^ delta_c); \
+}
+
+/* carries, result */
+#define SET_FLAGS_OSZAP_8(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(8, carries, result)
+#define SET_FLAGS_OSZAP_16(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(16, carries, result)
+#define SET_FLAGS_OSZAP_32(carries, result) \
+ SET_FLAGS_OSZAP_SIZE(32, carries, result)
+
+void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf)
+{
+ uint32_t temp_po = new_of ^ new_cf;
+ env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PO | LF_MASK_CF);
+ env->hvf_emul->lflags.auxbits |= (temp_po << LF_BIT_PO) |
+ (new_cf << LF_BIT_CF);
+}
+
+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAP_32(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAP_16(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAP_8(SUB_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAP_32(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAP_16(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAP_8(ADD_COUT_VEC(v1, v2, diff), diff);
+}
+
+
+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff)
+{
+ SET_FLAGS_OSZAPC_32(0, diff);
+}
+
+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff)
+{
+ SET_FLAGS_OSZAPC_16(0, diff);
+}
+
+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff)
+{
+ SET_FLAGS_OSZAPC_8(0, diff);
+}
+
+bool get_PF(CPUX86State *env)
+{
+ uint32_t temp = (255 & env->hvf_emul->lflags.result);
+ temp = temp ^ (255 & (env->hvf_emul->lflags.auxbits >> LF_BIT_PDB));
+ temp = (temp ^ (temp >> 4)) & 0x0F;
+ return (0x9669U >> temp) & 1;
+}
+
+void set_PF(CPUX86State *env, bool val)
+{
+ uint32_t temp = (255 & env->hvf_emul->lflags.result) ^ (!val);
+ env->hvf_emul->lflags.auxbits &= ~(LF_MASK_PDB);
+ env->hvf_emul->lflags.auxbits |= (temp << LF_BIT_PDB);
+}
+
+bool get_OF(CPUX86State *env)
+{
+ return ((env->hvf_emul->lflags.auxbits + (1U << LF_BIT_PO)) >> LF_BIT_CF) & 1;
+}
+
+bool get_CF(CPUX86State *env)
+{
+ return (env->hvf_emul->lflags.auxbits >> LF_BIT_CF) & 1;
+}
+
+void set_OF(CPUX86State *env, bool val)
+{
+ bool old_cf = get_CF(env);
+ SET_FLAGS_OxxxxC(env, val, old_cf);
+}
+
+void set_CF(CPUX86State *env, bool val)
+{
+ bool old_of = get_OF(env);
+ SET_FLAGS_OxxxxC(env, old_of, val);
+}
+
+bool get_AF(CPUX86State *env)
+{
+ return (env->hvf_emul->lflags.auxbits >> LF_BIT_AF) & 1;
+}
+
+void set_AF(CPUX86State *env, bool val)
+{
+ env->hvf_emul->lflags.auxbits &= ~(LF_MASK_AF);
+ env->hvf_emul->lflags.auxbits |= val << LF_BIT_AF;
+}
+
+bool get_ZF(CPUX86State *env)
+{
+ return !env->hvf_emul->lflags.result;
+}
+
+void set_ZF(CPUX86State *env, bool val)
+{
+ if (val) {
+ env->hvf_emul->lflags.auxbits ^=
+ (((env->hvf_emul->lflags.result >> LF_SIGN_BIT) & 1) << LF_BIT_SD);
+ /* merge the parity bits into the Parity Delta Byte */
+ uint32_t temp_pdb = (255 & env->hvf_emul->lflags.result);
+ env->hvf_emul->lflags.auxbits ^= (temp_pdb << LF_BIT_PDB);
+ /* now zero the .result value */
+ env->hvf_emul->lflags.result = 0;
+ } else {
+ env->hvf_emul->lflags.result |= (1 << 8);
+ }
+}
+
+bool get_SF(CPUX86State *env)
+{
+ return ((env->hvf_emul->lflags.result >> LF_SIGN_BIT) ^
+ (env->hvf_emul->lflags.auxbits >> LF_BIT_SD)) & 1;
+}
+
+void set_SF(CPUX86State *env, bool val)
+{
+ bool temp_sf = get_SF(env);
+ env->hvf_emul->lflags.auxbits ^= (temp_sf ^ val) << LF_BIT_SD;
+}
+
+void lflags_to_rflags(CPUX86State *env)
+{
+ env->hvf_emul->rflags.cf = get_CF(env);
+ env->hvf_emul->rflags.pf = get_PF(env);
+ env->hvf_emul->rflags.af = get_AF(env);
+ env->hvf_emul->rflags.zf = get_ZF(env);
+ env->hvf_emul->rflags.sf = get_SF(env);
+ env->hvf_emul->rflags.of = get_OF(env);
+}
+
+void rflags_to_lflags(CPUX86State *env)
+{
+ env->hvf_emul->lflags.auxbits = env->hvf_emul->lflags.result = 0;
+ set_OF(env, env->hvf_emul->rflags.of);
+ set_SF(env, env->hvf_emul->rflags.sf);
+ set_ZF(env, env->hvf_emul->rflags.zf);
+ set_AF(env, env->hvf_emul->rflags.af);
+ set_PF(env, env->hvf_emul->rflags.pf);
+ set_CF(env, env->hvf_emul->rflags.cf);
+}
diff --git a/target/i386/hvf/x86_flags.h b/target/i386/hvf/x86_flags.h
new file mode 100644
index 0000000000..8942745988
--- /dev/null
+++ b/target/i386/hvf/x86_flags.h
@@ -0,0 +1,80 @@
+/////////////////////////////////////////////////////////////////////////
+//
+// Copyright (C) 2001-2012 The Bochs Project
+// Copyright (C) 2017 Google Inc.
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 2 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License along with this library; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
+/////////////////////////////////////////////////////////////////////////
+/*
+ * x86 eflags functions
+ */
+#ifndef __X86_FLAGS_H__
+#define __X86_FLAGS_H__
+
+#include "cpu.h"
+void lflags_to_rflags(CPUX86State *env);
+void rflags_to_lflags(CPUX86State *env);
+
+bool get_PF(CPUX86State *env);
+void set_PF(CPUX86State *env, bool val);
+bool get_CF(CPUX86State *env);
+void set_CF(CPUX86State *env, bool val);
+bool get_AF(CPUX86State *env);
+void set_AF(CPUX86State *env, bool val);
+bool get_ZF(CPUX86State *env);
+void set_ZF(CPUX86State *env, bool val);
+bool get_SF(CPUX86State *env);
+void set_SF(CPUX86State *env, bool val);
+bool get_OF(CPUX86State *env);
+void set_OF(CPUX86State *env, bool val);
+
+void SET_FLAGS_OxxxxC(CPUX86State *env, uint32_t new_of, uint32_t new_cf);
+
+void SET_FLAGS_OSZAPC_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAPC_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAP_SUB32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAP_SUB16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAP_SUB8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAP_ADD32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAP_ADD16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAP_ADD8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+void SET_FLAGS_OSZAPC_LOGIC32(CPUX86State *env, uint32_t v1, uint32_t v2,
+ uint32_t diff);
+void SET_FLAGS_OSZAPC_LOGIC16(CPUX86State *env, uint16_t v1, uint16_t v2,
+ uint16_t diff);
+void SET_FLAGS_OSZAPC_LOGIC8(CPUX86State *env, uint8_t v1, uint8_t v2,
+ uint8_t diff);
+
+#endif /* __X86_FLAGS_H__ */
diff --git a/target/i386/hvf/x86_mmu.c b/target/i386/hvf/x86_mmu.c
new file mode 100644
index 0000000000..5c1f35acd0
--- /dev/null
+++ b/target/i386/hvf/x86_mmu.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "panic.h"
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "x86.h"
+#include "x86_mmu.h"
+#include "string.h"
+#include "vmcs.h"
+#include "vmx.h"
+
+#include "memory.h"
+#include "exec/address-spaces.h"
+
+#define pte_present(pte) (pte & PT_PRESENT)
+#define pte_write_access(pte) (pte & PT_WRITE)
+#define pte_user_access(pte) (pte & PT_USER)
+#define pte_exec_access(pte) (!(pte & PT_NX))
+
+#define pte_large_page(pte) (pte & PT_PS)
+#define pte_global_access(pte) (pte & PT_GLOBAL)
+
+#define PAE_CR3_MASK (~0x1fllu)
+#define LEGACY_CR3_MASK (0xffffffff)
+
+#define LEGACY_PTE_PAGE_MASK (0xffffffffllu << 12)
+#define PAE_PTE_PAGE_MASK ((-1llu << 12) & ((1llu << 52) - 1))
+#define PAE_PTE_LARGE_PAGE_MASK ((-1llu << (21)) & ((1llu << 52) - 1))
+
+struct gpt_translation {
+ target_ulong gva;
+ uint64_t gpa;
+ int err_code;
+ uint64_t pte[5];
+ bool write_access;
+ bool user_access;
+ bool exec_access;
+};
+
+static int gpt_top_level(struct CPUState *cpu, bool pae)
+{
+ if (!pae) {
+ return 2;
+ }
+ if (x86_is_long_mode(cpu)) {
+ return 4;
+ }
+
+ return 3;
+}
+
+static inline int gpt_entry(target_ulong addr, int level, bool pae)
+{
+ int level_shift = pae ? 9 : 10;
+ return (addr >> (level_shift * (level - 1) + 12)) & ((1 << level_shift) - 1);
+}
+
+static inline int pte_size(bool pae)
+{
+ return pae ? 8 : 4;
+}
+
+
+static bool get_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
+ int level, bool pae)
+{
+ int index;
+ uint64_t pte = 0;
+ uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
+ uint64_t gpa = pt->pte[level] & page_mask;
+
+ if (level == 3 && !x86_is_long_mode(cpu)) {
+ gpa = pt->pte[level];
+ }
+
+ index = gpt_entry(pt->gva, level, pae);
+ address_space_rw(&address_space_memory, gpa + index * pte_size(pae),
+ MEMTXATTRS_UNSPECIFIED, (uint8_t *)&pte, pte_size(pae), 0);
+
+ pt->pte[level - 1] = pte;
+
+ return true;
+}
+
+/* test page table entry */
+static bool test_pt_entry(struct CPUState *cpu, struct gpt_translation *pt,
+ int level, bool *is_large, bool pae)
+{
+ uint64_t pte = pt->pte[level];
+
+ if (pt->write_access) {
+ pt->err_code |= MMU_PAGE_WT;
+ }
+ if (pt->user_access) {
+ pt->err_code |= MMU_PAGE_US;
+ }
+ if (pt->exec_access) {
+ pt->err_code |= MMU_PAGE_NX;
+ }
+
+ if (!pte_present(pte)) {
+ return false;
+ }
+
+ if (pae && !x86_is_long_mode(cpu) && 2 == level) {
+ goto exit;
+ }
+
+ if (1 == level && pte_large_page(pte)) {
+ pt->err_code |= MMU_PAGE_PT;
+ *is_large = true;
+ }
+ if (!level) {
+ pt->err_code |= MMU_PAGE_PT;
+ }
+
+ uint32_t cr0 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0);
+ /* check protection */
+ if (cr0 & CR0_WP) {
+ if (pt->write_access && !pte_write_access(pte)) {
+ return false;
+ }
+ }
+
+ if (pt->user_access && !pte_user_access(pte)) {
+ return false;
+ }
+
+ if (pae && pt->exec_access && !pte_exec_access(pte)) {
+ return false;
+ }
+
+exit:
+ /* TODO: check reserved bits */
+ return true;
+}
+
+static inline uint64_t pse_pte_to_page(uint64_t pte)
+{
+ return ((pte & 0x1fe000) << 19) | (pte & 0xffc00000);
+}
+
+static inline uint64_t large_page_gpa(struct gpt_translation *pt, bool pae)
+{
+ VM_PANIC_ON(!pte_large_page(pt->pte[1]))
+ /* 2Mb large page */
+ if (pae) {
+ return (pt->pte[1] & PAE_PTE_LARGE_PAGE_MASK) | (pt->gva & 0x1fffff);
+ }
+
+ /* 4Mb large page */
+ return pse_pte_to_page(pt->pte[1]) | (pt->gva & 0x3fffff);
+}
+
+
+
+static bool walk_gpt(struct CPUState *cpu, target_ulong addr, int err_code,
+ struct gpt_translation *pt, bool pae)
+{
+ int top_level, level;
+ bool is_large = false;
+ target_ulong cr3 = rvmcs(cpu->hvf_fd, VMCS_GUEST_CR3);
+ uint64_t page_mask = pae ? PAE_PTE_PAGE_MASK : LEGACY_PTE_PAGE_MASK;
+
+ memset(pt, 0, sizeof(*pt));
+ top_level = gpt_top_level(cpu, pae);
+
+ pt->pte[top_level] = pae ? (cr3 & PAE_CR3_MASK) : (cr3 & LEGACY_CR3_MASK);
+ pt->gva = addr;
+ pt->user_access = (err_code & MMU_PAGE_US);
+ pt->write_access = (err_code & MMU_PAGE_WT);
+ pt->exec_access = (err_code & MMU_PAGE_NX);
+
+ for (level = top_level; level > 0; level--) {
+ get_pt_entry(cpu, pt, level, pae);
+
+ if (!test_pt_entry(cpu, pt, level - 1, &is_large, pae)) {
+ return false;
+ }
+
+ if (is_large) {
+ break;
+ }
+ }
+
+ if (!is_large) {
+ pt->gpa = (pt->pte[0] & page_mask) | (pt->gva & 0xfff);
+ } else {
+ pt->gpa = large_page_gpa(pt, pae);
+ }
+
+ return true;
+}
+
+
+bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa)
+{
+ bool res;
+ struct gpt_translation pt;
+ int err_code = 0;
+
+ if (!x86_is_paging_mode(cpu)) {
+ *gpa = gva;
+ return true;
+ }
+
+ res = walk_gpt(cpu, gva, err_code, &pt, x86_is_pae_enabled(cpu));
+ if (res) {
+ *gpa = pt.gpa;
+ return true;
+ }
+
+ return false;
+}
+
+void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes)
+{
+ uint64_t gpa;
+
+ while (bytes > 0) {
+ /* copy page */
+ int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
+
+ if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
+ VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
+ } else {
+ address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
+ data, copy, 1);
+ }
+
+ bytes -= copy;
+ gva += copy;
+ data += copy;
+ }
+}
+
+void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes)
+{
+ uint64_t gpa;
+
+ while (bytes > 0) {
+ /* copy page */
+ int copy = MIN(bytes, 0x1000 - (gva & 0xfff));
+
+ if (!mmu_gva_to_gpa(cpu, gva, &gpa)) {
+ VM_PANIC_EX("%s: mmu_gva_to_gpa %llx failed\n", __func__, gva);
+ }
+ address_space_rw(&address_space_memory, gpa, MEMTXATTRS_UNSPECIFIED,
+ data, copy, 0);
+
+ bytes -= copy;
+ gva += copy;
+ data += copy;
+ }
+}
diff --git a/target/i386/hvf/x86_mmu.h b/target/i386/hvf/x86_mmu.h
new file mode 100644
index 0000000000..0bd1acc94f
--- /dev/null
+++ b/target/i386/hvf/x86_mmu.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __X86_MMU_H__
+#define __X86_MMU_H__
+
+#define PT_PRESENT (1 << 0)
+#define PT_WRITE (1 << 1)
+#define PT_USER (1 << 2)
+#define PT_WT (1 << 3)
+#define PT_CD (1 << 4)
+#define PT_ACCESSED (1 << 5)
+#define PT_DIRTY (1 << 6)
+#define PT_PS (1 << 7)
+#define PT_GLOBAL (1 << 8)
+#define PT_NX (1llu << 63)
+
+/* error codes */
+#define MMU_PAGE_PT (1 << 0)
+#define MMU_PAGE_WT (1 << 1)
+#define MMU_PAGE_US (1 << 2)
+#define MMU_PAGE_NX (1 << 3)
+
+bool mmu_gva_to_gpa(struct CPUState *cpu, target_ulong gva, uint64_t *gpa);
+
+void vmx_write_mem(struct CPUState *cpu, target_ulong gva, void *data, int bytes);
+void vmx_read_mem(struct CPUState *cpu, void *data, target_ulong gva, int bytes);
+
+#endif /* __X86_MMU_H__ */
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
new file mode 100644
index 0000000000..d7f665f8fa
--- /dev/null
+++ b/target/i386/hvf/x86_task.c
@@ -0,0 +1,191 @@
+// This software is licensed under the terms of the GNU General Public
+// License version 2, as published by the Free Software Foundation, and
+// may be copied, distributed, and modified under those terms.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+#include "qemu/osdep.h"
+#include "panic.h"
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+
+#include "sysemu/hvf.h"
+#include "hvf-i386.h"
+#include "vmcs.h"
+#include "vmx.h"
+#include "x86.h"
+#include "x86_descr.h"
+#include "x86_mmu.h"
+#include "x86_decode.h"
+#include "x86_emu.h"
+#include "x86_task.h"
+#include "x86hvf.h"
+
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+
+#include "exec/address-spaces.h"
+#include "exec/exec-all.h"
+#include "exec/ioport.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/boards.h"
+#include "qemu/main-loop.h"
+#include "strings.h"
+#include "sysemu/accel.h"
+#include "sysemu/sysemu.h"
+#include "target/i386/cpu.h"
+
+// TODO: taskswitch handling
+static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ /* CR3 and ldt selector are not saved intentionally */
+ tss->eip = EIP(env);
+ tss->eflags = EFLAGS(env);
+ tss->eax = EAX(env);
+ tss->ecx = ECX(env);
+ tss->edx = EDX(env);
+ tss->ebx = EBX(env);
+ tss->esp = ESP(env);
+ tss->ebp = EBP(env);
+ tss->esi = ESI(env);
+ tss->edi = EDI(env);
+
+ tss->es = vmx_read_segment_selector(cpu, R_ES).sel;
+ tss->cs = vmx_read_segment_selector(cpu, R_CS).sel;
+ tss->ss = vmx_read_segment_selector(cpu, R_SS).sel;
+ tss->ds = vmx_read_segment_selector(cpu, R_DS).sel;
+ tss->fs = vmx_read_segment_selector(cpu, R_FS).sel;
+ tss->gs = vmx_read_segment_selector(cpu, R_GS).sel;
+}
+
+static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3);
+
+ RIP(env) = tss->eip;
+ EFLAGS(env) = tss->eflags | 2;
+
+ /* General purpose registers */
+ RAX(env) = tss->eax;
+ RCX(env) = tss->ecx;
+ RDX(env) = tss->edx;
+ RBX(env) = tss->ebx;
+ RSP(env) = tss->esp;
+ RBP(env) = tss->ebp;
+ RSI(env) = tss->esi;
+ RDI(env) = tss->edi;
+
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
+ vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
+}
+
+static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
+ uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
+{
+ struct x86_tss_segment32 tss_seg;
+ uint32_t new_tss_base = x86_segment_base(new_desc);
+ uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
+ uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
+
+ vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
+ save_state_to_tss32(cpu, &tss_seg);
+
+ vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
+ vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
+
+ if (old_tss_sel.sel != 0xffff) {
+ tss_seg.prev_tss = old_tss_sel.sel;
+
+ vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
+ }
+ load_state_from_tss32(cpu, &tss_seg);
+ return 0;
+}
+
+void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
+{
+ uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP);
+ if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
+ gate_type != VMCS_INTR_T_HWINTR &&
+ gate_type != VMCS_INTR_T_NMI)) {
+ int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH);
+ macvm_set_rip(cpu, rip + ins_len);
+ return;
+ }
+
+ load_regs(cpu);
+
+ struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
+ int ret;
+ x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
+ uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
+ uint32_t desc_limit;
+ struct x86_call_gate task_gate_desc;
+ struct vmx_segment vmx_seg;
+
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
+ x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
+
+ if (reason == TSR_IDT_GATE && gate_valid) {
+ int dpl;
+
+ ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
+
+ dpl = task_gate_desc.dpl;
+ x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
+ if (tss_sel.rpl > dpl || cs.rpl > dpl)
+ ;//DPRINTF("emulate_gp");
+ }
+
+ desc_limit = x86_segment_limit(&next_tss_desc);
+ if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
+ VM_PANIC("emulate_ts");
+ }
+
+ if (reason == TSR_IRET || reason == TSR_JMP) {
+ curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
+ x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
+ }
+
+ if (reason == TSR_IRET)
+ EFLAGS(env) &= ~RFLAGS_NT;
+
+ if (reason != TSR_CALL && reason != TSR_IDT_GATE)
+ old_tss_sel.sel = 0xffff;
+
+ if (reason != TSR_IRET) {
+ next_tss_desc.type |= (1 << 1); /* set busy flag */
+ x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
+ }
+
+ if (next_tss_desc.type & 8)
+ ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
+ else
+ //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
+ VM_PANIC("task_switch_16");
+
+ macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS);
+ x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
+ vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
+
+ store_regs(cpu);
+
+ hv_vcpu_invalidate_tlb(cpu->hvf_fd);
+ hv_vcpu_flush(cpu->hvf_fd);
+}
diff --git a/target/i386/hvf/x86_task.h b/target/i386/hvf/x86_task.h
new file mode 100644
index 0000000000..4f1b188d2e
--- /dev/null
+++ b/target/i386/hvf/x86_task.h
@@ -0,0 +1,18 @@
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 or
+ * (at your option) version 3 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HVF_TASK
+#define HVF_TASK
+void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel,
+ int reason, bool gate_valid, uint8_t gate, uint64_t gate_type);
+#endif
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
new file mode 100644
index 0000000000..71c0515073
--- /dev/null
+++ b/target/i386/hvf/x86hvf.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "x86hvf.h"
+#include "vmx.h"
+#include "vmcs.h"
+#include "cpu.h"
+#include "x86_descr.h"
+#include "x86_decode.h"
+
+#include "hw/i386/apic_internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <Hypervisor/hv.h>
+#include <Hypervisor/hv_vmx.h>
+#include <stdint.h>
+
+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
+ SegmentCache *qseg, bool is_tr)
+{
+ vmx_seg->sel = qseg->selector;
+ vmx_seg->base = qseg->base;
+ vmx_seg->limit = qseg->limit;
+
+ if (!qseg->selector && !x86_is_real(cpu) && !is_tr) {
+ /* the TR register is usable after processor reset despite
+ * having a null selector */
+ vmx_seg->ar = 1 << 16;
+ return;
+ }
+ vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf;
+ vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15;
+ vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14;
+ vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13;
+ vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12;
+ vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7;
+ vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5;
+ vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4;
+}
+
+void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg)
+{
+ qseg->limit = vmx_seg->limit;
+ qseg->base = vmx_seg->base;
+ qseg->selector = vmx_seg->sel;
+ qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) |
+ (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) |
+ (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) |
+ (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) |
+ (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) |
+ (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) |
+ (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) |
+ (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT);
+}
+
+void hvf_put_xsave(CPUState *cpu_state)
+{
+
+ struct X86XSaveArea *xsave;
+
+ xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;
+
+ x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave);
+
+ if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
+ abort();
+ }
+}
+
+void hvf_put_segments(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+ struct vmx_segment seg;
+
+ wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit);
+ wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base);
+
+ wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit);
+ wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base);
+
+ /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
+ wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]);
+ vmx_update_tpr(cpu_state);
+ wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer);
+
+ macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]);
+ macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_CS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_DS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_ES);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_SS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_FS);
+
+ hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_GS);
+
+ hvf_set_segment(cpu_state, &seg, &env->tr, true);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_TR);
+
+ hvf_set_segment(cpu_state, &seg, &env->ldt, false);
+ vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR);
+
+ hv_vcpu_flush(cpu_state->hvf_fd);
+}
+
+void hvf_put_msrs(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS,
+ env->sysenter_cs);
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP,
+ env->sysenter_esp);
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP,
+ env->sysenter_eip);
+
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star);
+
+#ifdef TARGET_X86_64
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar);
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase);
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask);
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar);
+#endif
+
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base);
+ hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base);
+
+ /* if (!osx_is_sierra())
+ wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/
+ hv_vm_sync_tsc(env->tsc);
+}
+
+
+void hvf_get_xsave(CPUState *cpu_state)
+{
+ struct X86XSaveArea *xsave;
+
+ xsave = X86_CPU(cpu_state)->env.kvm_xsave_buf;
+
+ if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) {
+ abort();
+ }
+
+ x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave);
+}
+
+void hvf_get_segments(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+
+ struct vmx_segment seg;
+
+ env->interrupt_injected = -1;
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_CS);
+ hvf_get_segment(&env->segs[R_CS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_DS);
+ hvf_get_segment(&env->segs[R_DS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_ES);
+ hvf_get_segment(&env->segs[R_ES], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_FS);
+ hvf_get_segment(&env->segs[R_FS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_GS);
+ hvf_get_segment(&env->segs[R_GS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_SS);
+ hvf_get_segment(&env->segs[R_SS], &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_TR);
+ hvf_get_segment(&env->tr, &seg);
+
+ vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR);
+ hvf_get_segment(&env->ldt, &seg);
+
+ env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT);
+ env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE);
+ env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT);
+ env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE);
+
+ env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0);
+ env->cr[2] = 0;
+ env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3);
+ env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4);
+
+ env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER);
+}
+
+void hvf_get_msrs(CPUState *cpu_state)
+{
+ CPUX86State *env = &X86_CPU(cpu_state)->env;
+ uint64_t tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp);
+ env->sysenter_cs = tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp);
+ env->sysenter_esp = tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp);
+ env->sysenter_eip = tmp;
+
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star);
+
+#ifdef TARGET_X86_64
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar);
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase);
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask);
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar);
+#endif
+
+ hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp);
+
+ env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET);
+}
+
+int hvf_put_registers(CPUState *cpu_state)
+{
+ X86CPU *x86cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &x86cpu->env;
+
+ wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]);
+ wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]);
+ wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]);
+ wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]);
+ wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]);
+ wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]);
+ wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]);
+ wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]);
+ wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]);
+ wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]);
+ wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]);
+ wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]);
+ wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]);
+ wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]);
+ wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]);
+ wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]);
+ wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags);
+ wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip);
+
+ wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0);
+
+ hvf_put_xsave(cpu_state);
+
+ hvf_put_segments(cpu_state);
+
+ hvf_put_msrs(cpu_state);
+
+ wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]);
+ wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]);
+
+ return 0;
+}
+
+int hvf_get_registers(CPUState *cpu_state)
+{
+ X86CPU *x86cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &x86cpu->env;
+
+
+ env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX);
+ env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX);
+ env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX);
+ env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX);
+ env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP);
+ env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP);
+ env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI);
+ env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI);
+ env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8);
+ env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9);
+ env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10);
+ env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11);
+ env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12);
+ env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13);
+ env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14);
+ env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15);
+
+ env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
+ env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP);
+
+ hvf_get_xsave(cpu_state);
+ env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0);
+
+ hvf_get_segments(cpu_state);
+ hvf_get_msrs(cpu_state);
+
+ env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0);
+ env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1);
+ env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2);
+ env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3);
+ env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4);
+ env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5);
+ env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6);
+ env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7);
+
+ return 0;
+}
+
+static void vmx_set_int_window_exiting(CPUState *cpu)
+{
+ uint64_t val;
+ val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
+ VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
+}
+
+void vmx_clear_int_window_exiting(CPUState *cpu)
+{
+ uint64_t val;
+ val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
+ wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
+ ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING);
+}
+
+#define NMI_VEC 2
+
+bool hvf_inject_interrupts(CPUState *cpu_state)
+{
+ X86CPU *x86cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &x86cpu->env;
+
+ uint8_t vector;
+ uint64_t intr_type;
+ bool have_event = true;
+ if (env->interrupt_injected != -1) {
+ vector = env->interrupt_injected;
+ intr_type = VMCS_INTR_T_SWINTR;
+ } else if (env->exception_injected != -1) {
+ vector = env->exception_injected;
+ if (vector == EXCP03_INT3 || vector == EXCP04_INTO) {
+ intr_type = VMCS_INTR_T_SWEXCEPTION;
+ } else {
+ intr_type = VMCS_INTR_T_HWEXCEPTION;
+ }
+ } else if (env->nmi_injected) {
+ vector = NMI_VEC;
+ intr_type = VMCS_INTR_T_NMI;
+ } else {
+ have_event = false;
+ }
+
+ uint64_t info = 0;
+ if (have_event) {
+ info = vector | intr_type | VMCS_INTR_VALID;
+ uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON);
+ if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) {
+ vmx_clear_nmi_blocking(cpu_state);
+ }
+
+ if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) {
+ info &= ~(1 << 12); /* clear undefined bit */
+ if (intr_type == VMCS_INTR_T_SWINTR ||
+ intr_type == VMCS_INTR_T_SWEXCEPTION) {
+ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len);
+ }
+
+ if (env->has_error_code) {
+ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR,
+ env->error_code);
+ }
+ /*printf("reinject %lx err %d\n", info, err);*/
+ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
+ };
+ }
+
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC;
+ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
+ } else {
+ vmx_set_nmi_window_exiting(cpu_state);
+ }
+ }
+
+ if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
+ (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
+ int line = cpu_get_pic_interrupt(&x86cpu->env);
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ if (line >= 0) {
+ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
+ VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
+ }
+ }
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
+ vmx_set_int_window_exiting(cpu_state);
+ }
+ return (cpu_state->interrupt_request
+ & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
+}
+
+int hvf_process_events(CPUState *cpu_state)
+{
+ X86CPU *cpu = X86_CPU(cpu_state);
+ CPUX86State *env = &cpu->env;
+
+ EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
+
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
+ hvf_cpu_synchronize_state(cpu_state);
+ do_cpu_init(cpu);
+ }
+
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ }
+ if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (EFLAGS(env) & IF_MASK)) ||
+ (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu_state->halted = 0;
+ }
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
+ hvf_cpu_synchronize_state(cpu_state);
+ do_cpu_sipi(cpu);
+ }
+ if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ hvf_cpu_synchronize_state(cpu_state);
+ apic_handle_tpr_access_report(cpu->apic_state, env->eip,
+ env->tpr_access_type);
+ }
+ return cpu_state->halted;
+}
diff --git a/target/i386/hvf/x86hvf.h b/target/i386/hvf/x86hvf.h
new file mode 100644
index 0000000000..79539f7282
--- /dev/null
+++ b/target/i386/hvf/x86hvf.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 Veertu Inc,
+ * Copyright (C) 2017 Google Inc,
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef X86HVF_H
+#define X86HVF_H
+#include "cpu.h"
+#include "x86_descr.h"
+
+int hvf_process_events(CPUState *);
+int hvf_put_registers(CPUState *);
+int hvf_get_registers(CPUState *);
+bool hvf_inject_interrupts(CPUState *);
+void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
+ SegmentCache *qseg, bool is_tr);
+void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg);
+void hvf_put_xsave(CPUState *cpu_state);
+void hvf_put_segments(CPUState *cpu_state);
+void hvf_put_msrs(CPUState *cpu_state);
+void hvf_get_xsave(CPUState *cpu_state);
+void hvf_get_msrs(CPUState *cpu_state);
+void vmx_clear_int_window_exiting(CPUState *cpu);
+void hvf_get_segments(CPUState *cpu_state);
+void vmx_update_tpr(CPUState *cpu);
+void hvf_cpu_synchronize_state(CPUState *cpu_state);
+#endif
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index b1e32e95d3..6f69e2fcfd 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -662,8 +662,6 @@ static int hyperv_handle_properties(CPUState *cs)
env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
}
if (cpu->hyperv_synic) {
- int sint;
-
if (!has_msr_hv_synic ||
kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
@@ -671,10 +669,6 @@ static int hyperv_handle_properties(CPUState *cs)
}
env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
- env->msr_hv_synic_version = HV_SYNIC_VERSION;
- for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
- env->msr_hv_synic_sint[sint] = HV_SINT_MASKED;
- }
}
if (cpu->hyperv_stimer) {
if (!has_msr_hv_stimer) {
@@ -1044,8 +1038,6 @@ void kvm_arch_reset_vcpu(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- env->exception_injected = -1;
- env->interrupt_injected = -1;
env->xcr0 = 1;
if (kvm_irqchip_in_kernel()) {
env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
@@ -1053,6 +1045,13 @@ void kvm_arch_reset_vcpu(X86CPU *cpu)
} else {
env->mp_state = KVM_MP_STATE_RUNNABLE;
}
+
+ if (cpu->hyperv_synic) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
+ }
+ }
}
void kvm_arch_do_init_vcpu(X86CPU *cpu)
@@ -1122,7 +1121,7 @@ static int kvm_get_supported_msrs(KVMState *s)
break;
case MSR_IA32_XSS:
has_msr_xss = true;
- break;;
+ break;
case HV_X64_MSR_CRASH_CTL:
has_msr_hv_crash = true;
break;
@@ -1678,19 +1677,26 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
- if (has_msr_hv_hypercall) {
- kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
- env->msr_hv_guest_os_id);
- kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
- env->msr_hv_hypercall);
+ /*
+ * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
+ * only sync them to KVM on the first cpu
+ */
+ if (current_cpu == first_cpu) {
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
+ env->msr_hv_guest_os_id);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
+ env->msr_hv_hypercall);
+ }
+ if (cpu->hyperv_time) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
+ env->msr_hv_tsc);
+ }
}
if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
- if (cpu->hyperv_time) {
- kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
- }
if (has_msr_hv_crash) {
int j;
@@ -1706,10 +1712,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (cpu->hyperv_synic) {
int j;
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
+
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
env->msr_hv_synic_control);
- kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
- env->msr_hv_synic_version);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
env->msr_hv_synic_evt_page);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
@@ -2073,7 +2079,6 @@ static int kvm_get_msrs(X86CPU *cpu)
uint32_t msr;
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
- kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
@@ -2277,9 +2282,6 @@ static int kvm_get_msrs(X86CPU *cpu)
case HV_X64_MSR_SCONTROL:
env->msr_hv_synic_control = msrs[i].data;
break;
- case HV_X64_MSR_SVERSION:
- env->msr_hv_synic_version = msrs[i].data;
- break;
case HV_X64_MSR_SIEFP:
env->msr_hv_synic_evt_page = msrs[i].data;
break;
diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c
index f479239875..303106981c 100644
--- a/target/i386/svm_helper.c
+++ b/target/i386/svm_helper.c
@@ -584,9 +584,7 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
{
CPUState *cs = CPU(x86_env_get_cpu(env));
- if (retaddr) {
- cpu_restore_state(cs, retaddr);
- }
+ cpu_restore_state(cs, retaddr);
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
PRIx64 ", " TARGET_FMT_lx ")!\n",
diff --git a/target/i386/translate.c b/target/i386/translate.c
index 088a9d9766..0135415d92 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -689,7 +689,7 @@ static void gen_compute_eflags(DisasContext *s)
return;
}
- TCGV_UNUSED(zero);
+ zero = NULL;
dst = cpu_cc_dst;
src1 = cpu_cc_src;
src2 = cpu_cc_src2;
@@ -2050,9 +2050,8 @@ static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
/* Compute the address, with a minimum number of TCG ops. */
static TCGv gen_lea_modrm_1(AddressParts a)
{
- TCGv ea;
+ TCGv ea = NULL;
- TCGV_UNUSED(ea);
if (a.index >= 0) {
if (a.scale == 0) {
ea = cpu_regs[a.index];
@@ -2067,7 +2066,7 @@ static TCGv gen_lea_modrm_1(AddressParts a)
} else if (a.base >= 0) {
ea = cpu_regs[a.base];
}
- if (TCGV_IS_UNUSED(ea)) {
+ if (!ea) {
tcg_gen_movi_tl(cpu_A0, a.disp);
ea = cpu_A0;
} else if (a.disp != 0) {
@@ -3951,7 +3950,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
/* Re-use the carry-out from a previous round. */
- TCGV_UNUSED(carry_in);
+ carry_in = NULL;
carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
switch (s->cc_op) {
case CC_OP_ADCX:
@@ -3979,7 +3978,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b,
break;
}
/* If we can't reuse carry-out, get it out of EFLAGS. */
- if (TCGV_IS_UNUSED(carry_in)) {
+ if (!carry_in) {
if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
gen_compute_eflags(s);
}
@@ -4467,10 +4466,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
target_ulong pc_start = s->base.pc_next;
s->pc_start = s->pc = pc_start;
- prefixes = 0;
s->override = -1;
- rex_w = -1;
- rex_r = 0;
#ifdef TARGET_X86_64
s->rex_x = 0;
s->rex_b = 0;
@@ -4484,6 +4480,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
return s->pc;
}
+ prefixes = 0;
+ rex_w = -1;
+ rex_r = 0;
+
next_byte:
b = x86_ldub_code(env, s);
/* Collect prefixes. */
@@ -4547,9 +4547,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
otherwise the instruction is LES or LDS. */
+ s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
break;
}
- s->pc++;
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
@@ -7672,7 +7672,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
tcg_gen_mov_tl(a0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, t0, rm);
- TCGV_UNUSED(a0);
+ a0 = NULL;
}
gen_op_mov_v_reg(ot, t1, reg);
tcg_gen_andi_tl(cpu_tmp0, t0, 3);
diff --git a/target/lm32/op_helper.c b/target/lm32/op_helper.c
index 2177c8ad12..30f670eee8 100644
--- a/target/lm32/op_helper.c
+++ b/target/lm32/op_helper.c
@@ -151,11 +151,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = lm32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ /* now we have a real cpu fault */
+ cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
index b8b2b13e36..2e1c5e6d01 100644
--- a/target/lm32/translate.c
+++ b/target/lm32/translate.c
@@ -1156,8 +1156,6 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
qemu_log_lock();
qemu_log("\n");
log_target_disas(cs, pc_start, dc->pc - pc_start);
- qemu_log("\nisize=%d osize=%d\n",
- dc->pc - pc_start, tcg_op_buf_count());
qemu_log_unlock();
}
#endif
diff --git a/target/m68k/Makefile.objs b/target/m68k/Makefile.objs
index 39141ab93d..d143f20270 100644
--- a/target/m68k/Makefile.objs
+++ b/target/m68k/Makefile.objs
@@ -1,3 +1,4 @@
obj-y += m68k-semi.o
obj-y += translate.o op_helper.o helper.o cpu.o fpu_helper.o
obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += monitor.o
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
index 0a3dd83548..03126ba543 100644
--- a/target/m68k/cpu.c
+++ b/target/m68k/cpu.c
@@ -55,17 +55,17 @@ static void m68k_cpu_reset(CPUState *s)
mcc->parent_reset(s);
memset(env, 0, offsetof(CPUM68KState, end_reset_fields));
-#if !defined(CONFIG_USER_ONLY)
- env->sr = 0x2700;
+#ifdef CONFIG_SOFTMMU
+ cpu_m68k_set_sr(env, SR_S | SR_I);
+#else
+ cpu_m68k_set_sr(env, 0);
#endif
- m68k_switch_sp(env);
for (i = 0; i < 8; i++) {
env->fregs[i].d = nan;
}
cpu_m68k_set_fpcr(env, 0);
env->fpsr = 0;
- cpu_m68k_set_ccr(env, 0);
/* TODO: We should set PC from the interrupt vector. */
env->pc = 0;
}
@@ -134,9 +134,18 @@ static void m68020_cpu_initfn(Object *obj)
m68k_set_feature(env, M68K_FEATURE_CAS);
m68k_set_feature(env, M68K_FEATURE_BKPT);
m68k_set_feature(env, M68K_FEATURE_RTD);
+ m68k_set_feature(env, M68K_FEATURE_CHK2);
}
#define m68030_cpu_initfn m68020_cpu_initfn
-#define m68040_cpu_initfn m68020_cpu_initfn
+
+static void m68040_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68020_cpu_initfn(obj);
+ m68k_set_feature(env, M68K_FEATURE_M68040);
+}
static void m68060_cpu_initfn(Object *obj)
{
@@ -156,6 +165,7 @@ static void m68060_cpu_initfn(Object *obj)
m68k_set_feature(env, M68K_FEATURE_CAS);
m68k_set_feature(env, M68K_FEATURE_BKPT);
m68k_set_feature(env, M68K_FEATURE_RTD);
+ m68k_set_feature(env, M68K_FEATURE_CHK2);
}
static void m5208_cpu_initfn(Object *obj)
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
index afae5f68ac..2985b039e1 100644
--- a/target/m68k/cpu.h
+++ b/target/m68k/cpu.h
@@ -45,6 +45,8 @@
#define EXCP_ADDRESS 3 /* Address error. */
#define EXCP_ILLEGAL 4 /* Illegal instruction. */
#define EXCP_DIV0 5 /* Divide by zero */
+#define EXCP_CHK 6 /* CHK, CHK2 Instructions */
+#define EXCP_TRAPCC 7 /* FTRAPcc, TRAPcc, TRAPV Instructions */
#define EXCP_PRIVILEGE 8 /* Privilege violation. */
#define EXCP_TRACE 9
#define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */
@@ -53,6 +55,9 @@
#define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */
#define EXCP_FORMAT 14 /* RTE format error. */
#define EXCP_UNINITIALIZED 15
+#define EXCP_SPURIOUS 24 /* Spurious interrupt */
+#define EXCP_INT_LEVEL_1 25 /* Level 1 Interrupt autovector */
+#define EXCP_INT_LEVEL_7 31 /* Level 7 Interrupt autovector */
#define EXCP_TRAP0 32 /* User trap #0. */
#define EXCP_TRAP15 47 /* User trap #15. */
#define EXCP_FP_BSUN 48 /* Branch Set on Unordered */
@@ -63,6 +68,9 @@
#define EXCP_FP_OVFL 53 /* Overflow */
#define EXCP_FP_SNAN 54 /* Signaling Not-A-Number */
#define EXCP_FP_UNIMP 55 /* Unimplemented Data type */
+#define EXCP_MMU_CONF 56 /* MMU Configuration Error */
+#define EXCP_MMU_ILLEGAL 57 /* MMU Illegal Operation Error */
+#define EXCP_MMU_ACCESS 58 /* MMU Access Level Violation Error */
#define EXCP_UNSUPPORTED 61
#define EXCP_RTE 0x100
@@ -81,7 +89,7 @@ typedef struct CPUM68KState {
/* SSP and USP. The current_sp is stored in aregs[7], the other here. */
int current_sp;
- uint32_t sp[2];
+ uint32_t sp[3];
/* Condition flags. */
uint32_t cc_op;
@@ -170,6 +178,7 @@ int cpu_m68k_signal_handler(int host_signum, void *pinfo,
void *puc);
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
+void cpu_m68k_set_sr(CPUM68KState *env, uint32_t);
void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val);
@@ -182,7 +191,7 @@ void cpu_m68k_set_fpcr(CPUM68KState *env, uint32_t val);
*/
typedef enum {
/* Translator only -- use env->cc_op. */
- CC_OP_DYNAMIC = -1,
+ CC_OP_DYNAMIC,
/* Each flag bit computed into cc_[xcnvz]. */
CC_OP_FLAGS,
@@ -210,10 +219,79 @@ typedef enum {
#define SR_I 0x0700
#define SR_M 0x1000
#define SR_S 0x2000
-#define SR_T 0x8000
+#define SR_T_SHIFT 14
+#define SR_T 0xc000
#define M68K_SSP 0
#define M68K_USP 1
+#define M68K_ISP 2
+
+/* m68k Control Registers */
+
+/* ColdFire */
+/* Memory Management Control Registers */
+#define M68K_CR_ASID 0x003
+#define M68K_CR_ACR0 0x004
+#define M68K_CR_ACR1 0x005
+#define M68K_CR_ACR2 0x006
+#define M68K_CR_ACR3 0x007
+#define M68K_CR_MMUBAR 0x008
+
+/* Processor Miscellaneous Registers */
+#define M68K_CR_PC 0x80F
+
+/* Local Memory and Module Control Registers */
+#define M68K_CR_ROMBAR0 0xC00
+#define M68K_CR_ROMBAR1 0xC01
+#define M68K_CR_RAMBAR0 0xC04
+#define M68K_CR_RAMBAR1 0xC05
+#define M68K_CR_MPCR 0xC0C
+#define M68K_CR_EDRAMBAR 0xC0D
+#define M68K_CR_SECMBAR 0xC0E
+#define M68K_CR_MBAR 0xC0F
+
+/* Local Memory Address Permutation Control Registers */
+#define M68K_CR_PCR1U0 0xD02
+#define M68K_CR_PCR1L0 0xD03
+#define M68K_CR_PCR2U0 0xD04
+#define M68K_CR_PCR2L0 0xD05
+#define M68K_CR_PCR3U0 0xD06
+#define M68K_CR_PCR3L0 0xD07
+#define M68K_CR_PCR1U1 0xD0A
+#define M68K_CR_PCR1L1 0xD0B
+#define M68K_CR_PCR2U1 0xD0C
+#define M68K_CR_PCR2L1 0xD0D
+#define M68K_CR_PCR3U1 0xD0E
+#define M68K_CR_PCR3L1 0xD0F
+
+/* MC680x0 */
+/* MC680[1234]0/CPU32 */
+#define M68K_CR_SFC 0x000
+#define M68K_CR_DFC 0x001
+#define M68K_CR_USP 0x800
+#define M68K_CR_VBR 0x801 /* + Coldfire */
+
+/* MC680[234]0 */
+#define M68K_CR_CACR 0x002 /* + Coldfire */
+#define M68K_CR_CAAR 0x802 /* MC68020 and MC68030 only */
+#define M68K_CR_MSP 0x803
+#define M68K_CR_ISP 0x804
+
+/* MC68040/MC68LC040 */
+#define M68K_CR_TC 0x003
+#define M68K_CR_ITT0 0x004
+#define M68K_CR_ITT1 0x005
+#define M68K_CR_DTT0 0x006
+#define M68K_CR_DTT1 0x007
+#define M68K_CR_MMUSR 0x805
+#define M68K_CR_URP 0x806
+#define M68K_CR_SRP 0x807
+
+/* MC68EC040 */
+#define M68K_CR_IACR0 0x004
+#define M68K_CR_IACR1 0x005
+#define M68K_CR_DACR0 0x006
+#define M68K_CR_DACR1 0x007
#define M68K_FPIAR_SHIFT 0
#define M68K_FPIAR (1 << M68K_FPIAR_SHIFT)
@@ -296,6 +374,8 @@ enum m68k_features {
M68K_FEATURE_CAS,
M68K_FEATURE_BKPT,
M68K_FEATURE_RTD,
+ M68K_FEATURE_CHK2,
+ M68K_FEATURE_M68040, /* instructions specific to MC68040 */
};
static inline int m68k_feature(CPUM68KState *env, int feature)
diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c
index c7f44c9bb3..99e5be8132 100644
--- a/target/m68k/gdbstub.c
+++ b/target/m68k/gdbstub.c
@@ -63,7 +63,7 @@ int m68k_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
} else {
switch (n) {
case 16:
- env->sr = tmp;
+ cpu_m68k_set_sr(env, tmp);
break;
case 17:
env->pc = tmp;
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
index 7e50ff5871..a999389e9a 100644
--- a/target/m68k/helper.c
+++ b/target/m68k/helper.c
@@ -171,28 +171,84 @@ void m68k_cpu_init_gdb(M68kCPU *cpu)
/* TODO: Add [E]MAC registers. */
}
-void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val)
+void HELPER(cf_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
{
M68kCPU *cpu = m68k_env_get_cpu(env);
switch (reg) {
- case 0x02: /* CACR */
+ case M68K_CR_CACR:
env->cacr = val;
m68k_switch_sp(env);
break;
- case 0x04: case 0x05: case 0x06: case 0x07: /* ACR[0-3] */
+ case M68K_CR_ACR0:
+ case M68K_CR_ACR1:
+ case M68K_CR_ACR2:
+ case M68K_CR_ACR3:
/* TODO: Implement Access Control Registers. */
break;
- case 0x801: /* VBR */
+ case M68K_CR_VBR:
env->vbr = val;
break;
/* TODO: Implement control registers. */
default:
- cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n",
+ cpu_abort(CPU(cpu),
+ "Unimplemented control register write 0x%x = 0x%x\n",
reg, val);
}
}
+void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ switch (reg) {
+ /* MC680[1234]0 */
+ case M68K_CR_VBR:
+ env->vbr = val;
+ return;
+ /* MC680[234]0 */
+ case M68K_CR_CACR:
+ env->cacr = val;
+ m68k_switch_sp(env);
+ return;
+ /* MC680[34]0 */
+ case M68K_CR_USP:
+ env->sp[M68K_USP] = val;
+ return;
+ case M68K_CR_MSP:
+ env->sp[M68K_SSP] = val;
+ return;
+ case M68K_CR_ISP:
+ env->sp[M68K_ISP] = val;
+ return;
+ }
+ cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n",
+ reg, val);
+}
+
+uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ switch (reg) {
+ /* MC680[1234]0 */
+ case M68K_CR_VBR:
+ return env->vbr;
+ /* MC680[234]0 */
+ case M68K_CR_CACR:
+ return env->cacr;
+ /* MC680[34]0 */
+ case M68K_CR_USP:
+ return env->sp[M68K_USP];
+ case M68K_CR_MSP:
+ return env->sp[M68K_SSP];
+ case M68K_CR_ISP:
+ return env->sp[M68K_ISP];
+ }
+ cpu_abort(CPU(cpu), "Unimplemented control register read 0x%x\n",
+ reg);
+}
+
void HELPER(set_macsr)(CPUM68KState *env, uint32_t val)
{
uint32_t acc;
@@ -232,8 +288,20 @@ void m68k_switch_sp(CPUM68KState *env)
int new_sp;
env->sp[env->current_sp] = env->aregs[7];
- new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP)
- ? M68K_SSP : M68K_USP;
+ if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ if (env->sr & SR_S) {
+ if (env->sr & SR_M) {
+ new_sp = M68K_SSP;
+ } else {
+ new_sp = M68K_ISP;
+ }
+ } else {
+ new_sp = M68K_USP;
+ }
+ } else {
+ new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP)
+ ? M68K_SSP : M68K_USP;
+ }
env->aregs[7] = env->sp[new_sp];
env->current_sp = new_sp;
}
@@ -316,13 +384,17 @@ uint32_t HELPER(sats)(uint32_t val, uint32_t v)
return val;
}
-void HELPER(set_sr)(CPUM68KState *env, uint32_t val)
+void cpu_m68k_set_sr(CPUM68KState *env, uint32_t sr)
{
- env->sr = val & 0xffe0;
- cpu_m68k_set_ccr(env, val);
+ env->sr = sr & 0xffe0;
+ cpu_m68k_set_ccr(env, sr);
m68k_switch_sp(env);
}
+void HELPER(set_sr)(CPUM68KState *env, uint32_t val)
+{
+ cpu_m68k_set_sr(env, val);
+}
/* MAC unit. */
/* FIXME: The MAC unit implementation is a bit of a mess. Some helpers
@@ -707,3 +779,10 @@ void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc)
res |= (uint64_t)(val & 0xffff0000) << 16;
env->macc[acc + 1] = res;
}
+
+#if defined(CONFIG_SOFTMMU)
+void HELPER(reset)(CPUM68KState *env)
+{
+ /* FIXME: reset all except CPU */
+}
+#endif
diff --git a/target/m68k/helper.h b/target/m68k/helper.h
index eebe52dae5..57f210aa14 100644
--- a/target/m68k/helper.h
+++ b/target/m68k/helper.h
@@ -8,7 +8,9 @@ DEF_HELPER_4(divsl, void, env, int, int, s32)
DEF_HELPER_4(divull, void, env, int, int, i32)
DEF_HELPER_4(divsll, void, env, int, int, s32)
DEF_HELPER_2(set_sr, void, env, i32)
-DEF_HELPER_3(movec, void, env, i32, i32)
+DEF_HELPER_3(cf_movec_to, void, env, i32, i32)
+DEF_HELPER_3(m68k_movec_to, void, env, i32, i32)
+DEF_HELPER_2(m68k_movec_from, i32, env, i32)
DEF_HELPER_4(cas2w, void, env, i32, i32, i32)
DEF_HELPER_4(cas2l, void, env, i32, i32, i32)
DEF_HELPER_4(cas2l_parallel, void, env, i32, i32, i32)
@@ -94,3 +96,10 @@ DEF_HELPER_FLAGS_4(bfchg_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfclr_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfset_mem, TCG_CALL_NO_WG, i32, env, i32, s32, i32)
DEF_HELPER_FLAGS_4(bfffo_mem, TCG_CALL_NO_WG, i64, env, i32, s32, i32)
+
+DEF_HELPER_3(chk, void, env, s32, s32)
+DEF_HELPER_4(chk2, void, env, s32, s32, s32)
+
+#if defined(CONFIG_SOFTMMU)
+DEF_HELPER_FLAGS_1(reset, TCG_CALL_NO_RWG, void, env)
+#endif
diff --git a/target/m68k/monitor.c b/target/m68k/monitor.c
new file mode 100644
index 0000000000..52781e85f0
--- /dev/null
+++ b/target/m68k/monitor.c
@@ -0,0 +1,40 @@
+/*
+ * QEMU monitor for m68k
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/hmp-target.h"
+
+static const MonitorDef monitor_defs[] = {
+ { "d0", offsetof(CPUM68KState, dregs[0]) },
+ { "d1", offsetof(CPUM68KState, dregs[1]) },
+ { "d2", offsetof(CPUM68KState, dregs[2]) },
+ { "d3", offsetof(CPUM68KState, dregs[3]) },
+ { "d4", offsetof(CPUM68KState, dregs[4]) },
+ { "d5", offsetof(CPUM68KState, dregs[5]) },
+ { "d6", offsetof(CPUM68KState, dregs[6]) },
+ { "d7", offsetof(CPUM68KState, dregs[7]) },
+ { "a0", offsetof(CPUM68KState, aregs[0]) },
+ { "a1", offsetof(CPUM68KState, aregs[1]) },
+ { "a2", offsetof(CPUM68KState, aregs[2]) },
+ { "a3", offsetof(CPUM68KState, aregs[3]) },
+ { "a4", offsetof(CPUM68KState, aregs[4]) },
+ { "a5", offsetof(CPUM68KState, aregs[5]) },
+ { "a6", offsetof(CPUM68KState, aregs[6]) },
+ { "a7", offsetof(CPUM68KState, aregs[7]) },
+ { "pc", offsetof(CPUM68KState, pc) },
+ { "sr", offsetof(CPUM68KState, sr) },
+ { "ssp", offsetof(CPUM68KState, sp[0]) },
+ { "usp", offsetof(CPUM68KState, sp[1]) },
+ { "isp", offsetof(CPUM68KState, sp[2]) },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
index 63089511cb..c61ca9392f 100644
--- a/target/m68k/op_helper.c
+++ b/target/m68k/op_helper.c
@@ -46,15 +46,12 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = m68k_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ /* now we have a real cpu fault */
+ cpu_loop_exit_restore(cs, retaddr);
}
}
-static void do_rte(CPUM68KState *env)
+static void cf_rte(CPUM68KState *env)
{
uint32_t sp;
uint32_t fmt;
@@ -65,13 +62,158 @@ static void do_rte(CPUM68KState *env)
sp |= (fmt >> 28) & 3;
env->aregs[7] = sp + 8;
- helper_set_sr(env, fmt);
+ cpu_m68k_set_sr(env, fmt);
}
-static void do_interrupt_all(CPUM68KState *env, int is_hw)
+static void m68k_rte(CPUM68KState *env)
+{
+ uint32_t sp;
+ uint16_t fmt;
+ uint16_t sr;
+
+ sp = env->aregs[7];
+throwaway:
+ sr = cpu_lduw_kernel(env, sp);
+ sp += 2;
+ env->pc = cpu_ldl_kernel(env, sp);
+ sp += 4;
+ if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) {
+ /* all except 68000 */
+ fmt = cpu_lduw_kernel(env, sp);
+ sp += 2;
+ switch (fmt >> 12) {
+ case 0:
+ break;
+ case 1:
+ env->aregs[7] = sp;
+ cpu_m68k_set_sr(env, sr);
+ goto throwaway;
+ case 2:
+ case 3:
+ sp += 4;
+ break;
+ case 4:
+ sp += 8;
+ break;
+ case 7:
+ sp += 52;
+ break;
+ }
+ }
+ env->aregs[7] = sp;
+ cpu_m68k_set_sr(env, sr);
+}
+
+static const char *m68k_exception_name(int index)
+{
+ switch (index) {
+ case EXCP_ACCESS:
+ return "Access Fault";
+ case EXCP_ADDRESS:
+ return "Address Error";
+ case EXCP_ILLEGAL:
+ return "Illegal Instruction";
+ case EXCP_DIV0:
+ return "Divide by Zero";
+ case EXCP_CHK:
+ return "CHK/CHK2";
+ case EXCP_TRAPCC:
+ return "FTRAPcc, TRAPcc, TRAPV";
+ case EXCP_PRIVILEGE:
+ return "Privilege Violation";
+ case EXCP_TRACE:
+ return "Trace";
+ case EXCP_LINEA:
+ return "A-Line";
+ case EXCP_LINEF:
+ return "F-Line";
+ case EXCP_DEBEGBP: /* 68020/030 only */
+ return "Copro Protocol Violation";
+ case EXCP_FORMAT:
+ return "Format Error";
+ case EXCP_UNINITIALIZED:
+ return "Unitialized Interruot";
+ case EXCP_SPURIOUS:
+ return "Spurious Interrupt";
+ case EXCP_INT_LEVEL_1:
+ return "Level 1 Interrupt";
+ case EXCP_INT_LEVEL_1 + 1:
+ return "Level 2 Interrupt";
+ case EXCP_INT_LEVEL_1 + 2:
+ return "Level 3 Interrupt";
+ case EXCP_INT_LEVEL_1 + 3:
+ return "Level 4 Interrupt";
+ case EXCP_INT_LEVEL_1 + 4:
+ return "Level 5 Interrupt";
+ case EXCP_INT_LEVEL_1 + 5:
+ return "Level 6 Interrupt";
+ case EXCP_INT_LEVEL_1 + 6:
+ return "Level 7 Interrupt";
+ case EXCP_TRAP0:
+ return "TRAP #0";
+ case EXCP_TRAP0 + 1:
+ return "TRAP #1";
+ case EXCP_TRAP0 + 2:
+ return "TRAP #2";
+ case EXCP_TRAP0 + 3:
+ return "TRAP #3";
+ case EXCP_TRAP0 + 4:
+ return "TRAP #4";
+ case EXCP_TRAP0 + 5:
+ return "TRAP #5";
+ case EXCP_TRAP0 + 6:
+ return "TRAP #6";
+ case EXCP_TRAP0 + 7:
+ return "TRAP #7";
+ case EXCP_TRAP0 + 8:
+ return "TRAP #8";
+ case EXCP_TRAP0 + 9:
+ return "TRAP #9";
+ case EXCP_TRAP0 + 10:
+ return "TRAP #10";
+ case EXCP_TRAP0 + 11:
+ return "TRAP #11";
+ case EXCP_TRAP0 + 12:
+ return "TRAP #12";
+ case EXCP_TRAP0 + 13:
+ return "TRAP #13";
+ case EXCP_TRAP0 + 14:
+ return "TRAP #14";
+ case EXCP_TRAP0 + 15:
+ return "TRAP #15";
+ case EXCP_FP_BSUN:
+ return "FP Branch/Set on unordered condition";
+ case EXCP_FP_INEX:
+ return "FP Inexact Result";
+ case EXCP_FP_DZ:
+ return "FP Divide by Zero";
+ case EXCP_FP_UNFL:
+ return "FP Underflow";
+ case EXCP_FP_OPERR:
+ return "FP Operand Error";
+ case EXCP_FP_OVFL:
+ return "FP Overflow";
+ case EXCP_FP_SNAN:
+ return "FP Signaling NAN";
+ case EXCP_FP_UNIMP:
+ return "FP Unimplemented Data Type";
+ case EXCP_MMU_CONF: /* 68030/68851 only */
+ return "MMU Configuration Error";
+ case EXCP_MMU_ILLEGAL: /* 68851 only */
+ return "MMU Illegal Operation";
+ case EXCP_MMU_ACCESS: /* 68851 only */
+ return "MMU Access Level Violation";
+ case 64 ... 255:
+ return "User Defined Vector";
+ }
+ return "Unassigned";
+}
+
+static void cf_interrupt_all(CPUM68KState *env, int is_hw)
{
CPUState *cs = CPU(m68k_env_get_cpu(env));
uint32_t sp;
+ uint32_t sr;
uint32_t fmt;
uint32_t retaddr;
uint32_t vector;
@@ -83,7 +225,7 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
switch (cs->exception_index) {
case EXCP_RTE:
/* Return from an exception. */
- do_rte(env);
+ cf_rte(env);
return;
case EXCP_HALT_INSN:
if (semihosting_enabled()
@@ -109,10 +251,17 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
vector = cs->exception_index << 2;
+ sr = env->sr | cpu_m68k_get_ccr(env);
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
+ ++count, m68k_exception_name(cs->exception_index),
+ vector, env->pc, env->aregs[7], sr);
+ }
+
fmt |= 0x40000000;
fmt |= vector << 16;
- fmt |= env->sr;
- fmt |= cpu_m68k_get_ccr(env);
+ fmt |= sr;
env->sr |= SR_S;
if (is_hw) {
@@ -134,6 +283,119 @@ static void do_interrupt_all(CPUM68KState *env, int is_hw)
env->pc = cpu_ldl_kernel(env, env->vbr + vector);
}
+static inline void do_stack_frame(CPUM68KState *env, uint32_t *sp,
+ uint16_t format, uint16_t sr,
+ uint32_t addr, uint32_t retaddr)
+{
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+ switch (format) {
+ case 4:
+ *sp -= 4;
+ cpu_stl_kernel(env, *sp, env->pc);
+ *sp -= 4;
+ cpu_stl_kernel(env, *sp, addr);
+ break;
+ case 3:
+ case 2:
+ *sp -= 4;
+ cpu_stl_kernel(env, *sp, addr);
+ break;
+ }
+ *sp -= 2;
+ cpu_stw_kernel(env, *sp, (format << 12) + (cs->exception_index << 2));
+ *sp -= 4;
+ cpu_stl_kernel(env, *sp, retaddr);
+ *sp -= 2;
+ cpu_stw_kernel(env, *sp, sr);
+}
+
+static void m68k_interrupt_all(CPUM68KState *env, int is_hw)
+{
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+ uint32_t sp;
+ uint32_t retaddr;
+ uint32_t vector;
+ uint16_t sr, oldsr;
+
+ retaddr = env->pc;
+
+ if (!is_hw) {
+ switch (cs->exception_index) {
+ case EXCP_RTE:
+ /* Return from an exception. */
+ m68k_rte(env);
+ return;
+ case EXCP_TRAP0 ... EXCP_TRAP15:
+ /* Move the PC after the trap instruction. */
+ retaddr += 2;
+ break;
+ }
+ }
+
+ vector = cs->exception_index << 2;
+
+ sr = env->sr | cpu_m68k_get_ccr(env);
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ qemu_log("INT %6d: %s(%#x) pc=%08x sp=%08x sr=%04x\n",
+ ++count, m68k_exception_name(cs->exception_index),
+ vector, env->pc, env->aregs[7], sr);
+ }
+
+ /*
+ * MC68040UM/AD, chapter 9.3.10
+ */
+
+ /* "the processor first make an internal copy" */
+ oldsr = sr;
+ /* "set the mode to supervisor" */
+ sr |= SR_S;
+ /* "suppress tracing" */
+ sr &= ~SR_T;
+ /* "sets the processor interrupt mask" */
+ if (is_hw) {
+ sr |= (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
+ }
+ cpu_m68k_set_sr(env, sr);
+ sp = env->aregs[7];
+
+ sp &= ~1;
+ if (cs->exception_index == EXCP_ADDRESS) {
+ do_stack_frame(env, &sp, 2, oldsr, 0, retaddr);
+ } else if (cs->exception_index == EXCP_ILLEGAL ||
+ cs->exception_index == EXCP_DIV0 ||
+ cs->exception_index == EXCP_CHK ||
+ cs->exception_index == EXCP_TRAPCC ||
+ cs->exception_index == EXCP_TRACE) {
+ /* FIXME: addr is not only env->pc */
+ do_stack_frame(env, &sp, 2, oldsr, env->pc, retaddr);
+ } else if (is_hw && oldsr & SR_M &&
+ cs->exception_index >= EXCP_SPURIOUS &&
+ cs->exception_index <= EXCP_INT_LEVEL_7) {
+ do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
+ oldsr = sr;
+ env->aregs[7] = sp;
+ cpu_m68k_set_sr(env, sr &= ~SR_M);
+ sp = env->aregs[7] & ~1;
+ do_stack_frame(env, &sp, 1, oldsr, 0, retaddr);
+ } else {
+ do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
+ }
+
+ env->aregs[7] = sp;
+ /* Jump to vector. */
+ env->pc = cpu_ldl_kernel(env, env->vbr + vector);
+}
+
+static void do_interrupt_all(CPUM68KState *env, int is_hw)
+{
+ if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ m68k_interrupt_all(env, is_hw);
+ return;
+ }
+ cf_interrupt_all(env, is_hw);
+}
+
void m68k_cpu_do_interrupt(CPUState *cs)
{
M68kCPU *cpu = M68K_CPU(cs);
@@ -682,3 +944,64 @@ uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
is already zero. */
return n | ffo;
}
+
+void HELPER(chk)(CPUM68KState *env, int32_t val, int32_t ub)
+{
+ /* From the specs:
+ * X: Not affected, C,V,Z: Undefined,
+ * N: Set if val < 0; cleared if val > ub, undefined otherwise
+ * We implement here values found from a real MC68040:
+ * X,V,Z: Not affected
+ * N: Set if val < 0; cleared if val >= 0
+ * C: if 0 <= ub: set if val < 0 or val > ub, cleared otherwise
+ * if 0 > ub: set if val > ub and val < 0, cleared otherwise
+ */
+ env->cc_n = val;
+ env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0;
+
+ if (val < 0 || val > ub) {
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+
+ /* Recover PC and CC_OP for the beginning of the insn. */
+ cpu_restore_state(cs, GETPC());
+
+ /* flags have been modified by gen_flush_flags() */
+ env->cc_op = CC_OP_FLAGS;
+ /* Adjust PC to end of the insn. */
+ env->pc += 2;
+
+ cs->exception_index = EXCP_CHK;
+ cpu_loop_exit(cs);
+ }
+}
+
+void HELPER(chk2)(CPUM68KState *env, int32_t val, int32_t lb, int32_t ub)
+{
+ /* From the specs:
+ * X: Not affected, N,V: Undefined,
+ * Z: Set if val is equal to lb or ub
+ * C: Set if val < lb or val > ub, cleared otherwise
+ * We implement here values found from a real MC68040:
+ * X,N,V: Not affected
+ * Z: Set if val is equal to lb or ub
+ * C: if lb <= ub: set if val < lb or val > ub, cleared otherwise
+ * if lb > ub: set if val > ub and val < lb, cleared otherwise
+ */
+ env->cc_z = val != lb && val != ub;
+ env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb;
+
+ if (env->cc_c) {
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+
+ /* Recover PC and CC_OP for the beginning of the insn. */
+ cpu_restore_state(cs, GETPC());
+
+ /* flags have been modified by gen_flush_flags() */
+ env->cc_op = CC_OP_FLAGS;
+ /* Adjust PC to end of the insn. */
+ env->pc += 4;
+
+ cs->exception_index = EXCP_CHK;
+ cpu_loop_exit(cs);
+ }
+}
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index b60909222c..f0e86a73d4 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -181,11 +181,6 @@ static void do_writebacks(DisasContext *s)
#define IS_USER(s) s->user
#endif
-/* XXX: move that elsewhere */
-/* ??? Fix exceptions. */
-static void *gen_throws_exception;
-#define gen_last_qop NULL
-
typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
#ifdef DEBUG_DISPATCH
@@ -207,6 +202,7 @@ typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
#endif
static const uint8_t cc_op_live[CC_OP_NB] = {
+ [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
[CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
[CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
[CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
@@ -274,7 +270,6 @@ static void gen_raise_exception(int nr)
static void gen_exception(DisasContext *s, uint32_t where, int nr)
{
- update_cc_op(s);
gen_jmp_im(s, where);
gen_raise_exception(nr);
}
@@ -310,7 +305,6 @@ static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
default:
g_assert_not_reached();
}
- gen_throws_exception = gen_last_qop;
return tmp;
}
@@ -331,7 +325,6 @@ static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
default:
g_assert_not_reached();
}
- gen_throws_exception = gen_last_qop;
}
typedef enum {
@@ -1001,7 +994,6 @@ static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
}
tcg_temp_free(tmp);
tcg_temp_free_i64(t64);
- gen_throws_exception = gen_last_qop;
}
static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
@@ -1056,7 +1048,6 @@ static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp)
}
tcg_temp_free(tmp);
tcg_temp_free_i64(t64);
- gen_throws_exception = gen_last_qop;
}
static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
@@ -1518,12 +1509,12 @@ DISAS_INSN(dbcc)
DISAS_INSN(undef_mac)
{
- gen_exception(s, s->pc - 2, EXCP_LINEA);
+ gen_exception(s, s->insn_pc, EXCP_LINEA);
}
DISAS_INSN(undef_fpu)
{
- gen_exception(s, s->pc - 2, EXCP_LINEF);
+ gen_exception(s, s->insn_pc, EXCP_LINEF);
}
DISAS_INSN(undef)
@@ -1532,8 +1523,8 @@ DISAS_INSN(undef)
for the 680x0 series, as well as those that are implemented
but actually illegal for CPU32 or pre-68020. */
qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %08x",
- insn, s->pc - 2);
- gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
+ insn, s->insn_pc);
+ gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
}
DISAS_INSN(mulw)
@@ -2140,6 +2131,68 @@ DISAS_INSN(bitop_im)
}
}
+static TCGv gen_get_ccr(DisasContext *s)
+{
+ TCGv dest;
+
+ update_cc_op(s);
+ dest = tcg_temp_new();
+ gen_helper_get_ccr(dest, cpu_env);
+ return dest;
+}
+
+static TCGv gen_get_sr(DisasContext *s)
+{
+ TCGv ccr;
+ TCGv sr;
+
+ ccr = gen_get_ccr(s);
+ sr = tcg_temp_new();
+ tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
+ tcg_gen_or_i32(sr, sr, ccr);
+ return sr;
+}
+
+static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
+{
+ if (ccr_only) {
+ tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
+ tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
+ tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
+ tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
+ tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
+ } else {
+ TCGv sr = tcg_const_i32(val);
+ gen_helper_set_sr(cpu_env, sr);
+ tcg_temp_free(sr);
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
+{
+ if (ccr_only) {
+ gen_helper_set_ccr(cpu_env, val);
+ } else {
+ gen_helper_set_sr(cpu_env, val);
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ bool ccr_only)
+{
+ if ((insn & 0x3f) == 0x3c) {
+ uint16_t val;
+ val = read_im16(env, s);
+ gen_set_sr_im(s, val, ccr_only);
+ } else {
+ TCGv src;
+ SRC_EA(env, src, OS_WORD, 0, NULL);
+ gen_set_sr(s, src, ccr_only);
+ }
+}
+
DISAS_INSN(arith_im)
{
int op;
@@ -2148,6 +2201,7 @@ DISAS_INSN(arith_im)
TCGv dest;
TCGv addr;
int opsize;
+ bool with_SR = ((insn & 0x3f) == 0x3c);
op = (insn >> 9) & 7;
opsize = insn_opsize(insn);
@@ -2164,32 +2218,73 @@ DISAS_INSN(arith_im)
default:
abort();
}
- SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
+
+ if (with_SR) {
+ /* SR/CCR can only be used with andi/eori/ori */
+ if (op == 2 || op == 3 || op == 6) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ switch (opsize) {
+ case OS_BYTE:
+ src1 = gen_get_ccr(s);
+ break;
+ case OS_WORD:
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+ src1 = gen_get_sr(s);
+ break;
+ case OS_LONG:
+ disas_undef(env, s, insn);
+ return;
+ }
+ } else {
+ SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
+ }
dest = tcg_temp_new();
switch (op) {
case 0: /* ori */
tcg_gen_or_i32(dest, src1, im);
- gen_logic_cc(s, dest, opsize);
+ if (with_SR) {
+ gen_set_sr(s, dest, opsize == OS_BYTE);
+ } else {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+ }
break;
case 1: /* andi */
tcg_gen_and_i32(dest, src1, im);
- gen_logic_cc(s, dest, opsize);
+ if (with_SR) {
+ gen_set_sr(s, dest, opsize == OS_BYTE);
+ } else {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+ }
break;
case 2: /* subi */
tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
tcg_gen_sub_i32(dest, src1, im);
gen_update_cc_add(dest, im, opsize);
set_cc_op(s, CC_OP_SUBB + opsize);
+ DEST_EA(env, insn, opsize, dest, &addr);
break;
case 3: /* addi */
tcg_gen_add_i32(dest, src1, im);
gen_update_cc_add(dest, im, opsize);
tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
set_cc_op(s, CC_OP_ADDB + opsize);
+ DEST_EA(env, insn, opsize, dest, &addr);
break;
case 5: /* eori */
tcg_gen_xor_i32(dest, src1, im);
- gen_logic_cc(s, dest, opsize);
+ if (with_SR) {
+ gen_set_sr(s, dest, opsize == OS_BYTE);
+ } else {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+ }
break;
case 6: /* cmpi */
gen_update_cc_cmp(s, src1, im, opsize);
@@ -2198,9 +2293,6 @@ DISAS_INSN(arith_im)
abort();
}
tcg_temp_free(im);
- if (op != 6) {
- DEST_EA(env, insn, opsize, dest, &addr);
- }
tcg_temp_free(dest);
}
@@ -2483,17 +2575,6 @@ DISAS_INSN(clr)
tcg_temp_free(zero);
}
-static TCGv gen_get_ccr(DisasContext *s)
-{
- TCGv dest;
-
- gen_flush_flags(s);
- update_cc_op(s);
- dest = tcg_temp_new();
- gen_helper_get_ccr(dest, cpu_env);
- return dest;
-}
-
DISAS_INSN(move_from_ccr)
{
TCGv ccr;
@@ -2520,43 +2601,9 @@ DISAS_INSN(neg)
tcg_temp_free(dest);
}
-static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
-{
- if (ccr_only) {
- tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
- tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
- tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
- tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
- tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
- } else {
- gen_helper_set_sr(cpu_env, tcg_const_i32(val));
- }
- set_cc_op(s, CC_OP_FLAGS);
-}
-
-static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
- int ccr_only)
-{
- if ((insn & 0x38) == 0) {
- if (ccr_only) {
- gen_helper_set_ccr(cpu_env, DREG(insn, 0));
- } else {
- gen_helper_set_sr(cpu_env, DREG(insn, 0));
- }
- set_cc_op(s, CC_OP_FLAGS);
- } else if ((insn & 0x3f) == 0x3c) {
- uint16_t val;
- val = read_im16(env, s);
- gen_set_sr_im(s, val, ccr_only);
- } else {
- disas_undef(env, s, insn);
- }
-}
-
-
DISAS_INSN(move_to_ccr)
{
- gen_set_sr(env, s, insn, 1);
+ gen_move_to_sr(env, s, insn, true);
}
DISAS_INSN(not)
@@ -2593,7 +2640,7 @@ DISAS_INSN(swap)
DISAS_INSN(bkpt)
{
- gen_exception(s, s->pc - 2, EXCP_DEBUG);
+ gen_exception(s, s->insn_pc, EXCP_DEBUG);
}
DISAS_INSN(pea)
@@ -2646,7 +2693,7 @@ DISAS_INSN(pulse)
DISAS_INSN(illegal)
{
- gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
+ gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
}
/* ??? This should be atomic. */
@@ -2676,7 +2723,7 @@ DISAS_INSN(mull)
if (ext & 0x400) {
if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
- gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
+ gen_exception(s, s->insn_pc, EXCP_UNSUPPORTED);
return;
}
@@ -2772,6 +2819,18 @@ DISAS_INSN(unlk)
tcg_temp_free(src);
}
+#if defined(CONFIG_SOFTMMU)
+DISAS_INSN(reset)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+
+ gen_helper_reset(cpu_env);
+}
+#endif
+
DISAS_INSN(nop)
{
}
@@ -2905,6 +2964,7 @@ DISAS_INSN(branch)
gen_jmp_tb(s, 0, s->pc);
} else {
/* Unconditional branch. */
+ update_cc_op(s);
gen_jmp_tb(s, 0, base + offset);
}
}
@@ -3956,8 +4016,8 @@ DISAS_INSN(bfop_reg)
int ofs = extract32(ext, 6, 5); /* big bit-endian */
TCGv mask, tofs, tlen;
- TCGV_UNUSED(tofs);
- TCGV_UNUSED(tlen);
+ tofs = NULL;
+ tlen = NULL;
if ((insn & 0x0f00) == 0x0d00) { /* bfffo */
tofs = tcg_temp_new();
tlen = tcg_temp_new();
@@ -3973,7 +4033,7 @@ DISAS_INSN(bfop_reg)
}
tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
mask = tcg_const_i32(ror32(maski, ofs));
- if (!TCGV_IS_UNUSED(tofs)) {
+ if (tofs) {
tcg_gen_movi_i32(tofs, ofs);
tcg_gen_movi_i32(tlen, len);
}
@@ -3985,13 +4045,13 @@ DISAS_INSN(bfop_reg)
tcg_gen_andi_i32(tmp, tmp, 31);
mask = tcg_const_i32(0x7fffffffu);
tcg_gen_shr_i32(mask, mask, tmp);
- if (!TCGV_IS_UNUSED(tlen)) {
+ if (tlen) {
tcg_gen_addi_i32(tlen, tmp, 1);
}
} else {
/* Immediate width */
mask = tcg_const_i32(0x7fffffffu >> (len - 1));
- if (!TCGV_IS_UNUSED(tlen)) {
+ if (tlen) {
tcg_gen_movi_i32(tlen, len);
}
}
@@ -4001,7 +4061,7 @@ DISAS_INSN(bfop_reg)
tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotr_i32(mask, mask, tmp);
- if (!TCGV_IS_UNUSED(tofs)) {
+ if (tofs) {
tcg_gen_mov_i32(tofs, tmp);
}
} else {
@@ -4009,7 +4069,7 @@ DISAS_INSN(bfop_reg)
tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
tcg_gen_rotri_i32(mask, mask, ofs);
- if (!TCGV_IS_UNUSED(tofs)) {
+ if (tofs) {
tcg_gen_movi_i32(tofs, ofs);
}
}
@@ -4212,16 +4272,148 @@ DISAS_INSN(ff1)
gen_helper_ff1(reg, reg);
}
-static TCGv gen_get_sr(DisasContext *s)
+DISAS_INSN(chk)
{
- TCGv ccr;
- TCGv sr;
+ TCGv src, reg;
+ int opsize;
- ccr = gen_get_ccr(s);
- sr = tcg_temp_new();
- tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
- tcg_gen_or_i32(sr, sr, ccr);
- return sr;
+ switch ((insn >> 7) & 3) {
+ case 3:
+ opsize = OS_WORD;
+ break;
+ case 2:
+ if (m68k_feature(env, M68K_FEATURE_CHK2)) {
+ opsize = OS_LONG;
+ break;
+ }
+ /* fallthru */
+ default:
+ gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
+ return;
+ }
+ SRC_EA(env, src, opsize, 1, NULL);
+ reg = gen_extend(DREG(insn, 9), opsize, 1);
+
+ gen_flush_flags(s);
+ gen_helper_chk(cpu_env, reg, src);
+}
+
+DISAS_INSN(chk2)
+{
+ uint16_t ext;
+ TCGv addr1, addr2, bound1, bound2, reg;
+ int opsize;
+
+ switch ((insn >> 9) & 3) {
+ case 0:
+ opsize = OS_BYTE;
+ break;
+ case 1:
+ opsize = OS_WORD;
+ break;
+ case 2:
+ opsize = OS_LONG;
+ break;
+ default:
+ gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
+ return;
+ }
+
+ ext = read_im16(env, s);
+ if ((ext & 0x0800) == 0) {
+ gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
+ return;
+ }
+
+ addr1 = gen_lea(env, s, insn, OS_UNSIZED);
+ addr2 = tcg_temp_new();
+ tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
+
+ bound1 = gen_load(s, opsize, addr1, 1);
+ tcg_temp_free(addr1);
+ bound2 = gen_load(s, opsize, addr2, 1);
+ tcg_temp_free(addr2);
+
+ reg = tcg_temp_new();
+ if (ext & 0x8000) {
+ tcg_gen_mov_i32(reg, AREG(ext, 12));
+ } else {
+ gen_ext(reg, DREG(ext, 12), opsize, 1);
+ }
+
+ gen_flush_flags(s);
+ gen_helper_chk2(cpu_env, reg, bound1, bound2);
+ tcg_temp_free(reg);
+}
+
+static void m68k_copy_line(TCGv dst, TCGv src, int index)
+{
+ TCGv addr;
+ TCGv_i64 t0, t1;
+
+ addr = tcg_temp_new();
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i32(addr, src, ~15);
+ tcg_gen_qemu_ld64(t0, addr, index);
+ tcg_gen_addi_i32(addr, addr, 8);
+ tcg_gen_qemu_ld64(t1, addr, index);
+
+ tcg_gen_andi_i32(addr, dst, ~15);
+ tcg_gen_qemu_st64(t0, addr, index);
+ tcg_gen_addi_i32(addr, addr, 8);
+ tcg_gen_qemu_st64(t1, addr, index);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(addr);
+}
+
+DISAS_INSN(move16_reg)
+{
+ int index = IS_USER(s);
+ TCGv tmp;
+ uint16_t ext;
+
+ ext = read_im16(env, s);
+ if ((ext & (1 << 15)) == 0) {
+ gen_exception(s, s->insn_pc, EXCP_ILLEGAL);
+ }
+
+ m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
+
+ /* Ax can be Ay, so save Ay before incrementing Ax */
+ tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, AREG(ext, 12));
+ tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
+ tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(move16_mem)
+{
+ int index = IS_USER(s);
+ TCGv reg, addr;
+
+ reg = AREG(insn, 0);
+ addr = tcg_const_i32(read_im32(env, s));
+
+ if ((insn >> 3) & 1) {
+ /* MOVE16 (xxx).L, (Ay) */
+ m68k_copy_line(reg, addr, index);
+ } else {
+ /* MOVE16 (Ay), (xxx).L */
+ m68k_copy_line(addr, reg, index);
+ }
+
+ tcg_temp_free(addr);
+
+ if (((insn >> 3) & 2) == 0) {
+ /* (Ay)+ */
+ tcg_gen_addi_i32(reg, reg, 16);
+ }
}
DISAS_INSN(strldsr)
@@ -4249,27 +4441,28 @@ DISAS_INSN(move_from_sr)
TCGv sr;
if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
sr = gen_get_sr(s);
DEST_EA(env, insn, OS_WORD, sr, NULL);
}
+#if defined(CONFIG_SOFTMMU)
DISAS_INSN(move_to_sr)
{
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
- gen_set_sr(env, s, insn, 0);
+ gen_move_to_sr(env, s, insn, false);
gen_lookup_tb(s);
}
DISAS_INSN(move_from_usp)
{
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
@@ -4279,7 +4472,7 @@ DISAS_INSN(move_from_usp)
DISAS_INSN(move_to_usp)
{
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
tcg_gen_st_i32(AREG(insn, 0), cpu_env,
@@ -4288,6 +4481,11 @@ DISAS_INSN(move_to_usp)
DISAS_INSN(halt)
{
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+
gen_exception(s, s->pc, EXCP_HALT_INSN);
}
@@ -4296,7 +4494,7 @@ DISAS_INSN(stop)
uint16_t ext;
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
@@ -4310,19 +4508,19 @@ DISAS_INSN(stop)
DISAS_INSN(rte)
{
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
- gen_exception(s, s->pc - 2, EXCP_RTE);
+ gen_exception(s, s->insn_pc, EXCP_RTE);
}
-DISAS_INSN(movec)
+DISAS_INSN(cf_movec)
{
uint16_t ext;
TCGv reg;
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
@@ -4333,14 +4531,39 @@ DISAS_INSN(movec)
} else {
reg = DREG(ext, 12);
}
- gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_helper_cf_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_lookup_tb(s);
+}
+
+DISAS_INSN(m68k_movec)
+{
+ uint16_t ext;
+ TCGv reg;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ if (ext & 0x8000) {
+ reg = AREG(ext, 12);
+ } else {
+ reg = DREG(ext, 12);
+ }
+ if (insn & 1) {
+ gen_helper_m68k_movec_to(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ } else {
+ gen_helper_m68k_movec_from(reg, cpu_env, tcg_const_i32(ext & 0xfff));
+ }
gen_lookup_tb(s);
}
DISAS_INSN(intouch)
{
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
/* ICache fetch. Implement as no-op. */
@@ -4349,15 +4572,33 @@ DISAS_INSN(intouch)
DISAS_INSN(cpushl)
{
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+ /* Cache push/invalidate. Implement as no-op. */
+}
+
+DISAS_INSN(cpush)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
/* Cache push/invalidate. Implement as no-op. */
}
+DISAS_INSN(cinv)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+ /* Invalidate cache line. Implement as no-op. */
+}
+
DISAS_INSN(wddata)
{
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
}
DISAS_INSN(wdebug)
@@ -4365,16 +4606,17 @@ DISAS_INSN(wdebug)
M68kCPU *cpu = m68k_env_get_cpu(env);
if (IS_USER(s)) {
- gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
return;
}
/* TODO: Implement wdebug. */
cpu_abort(CPU(cpu), "WDEBUG not implemented");
}
+#endif
DISAS_INSN(trap)
{
- gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
+ gen_exception(s, s->insn_pc, EXCP_TRAP0 + (insn & 0xf));
}
static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
@@ -4883,6 +5125,7 @@ static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
DisasCompare c;
gen_fcc_cond(&c, s, cond);
+ update_cc_op(s);
tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
free_cond(&c);
}
@@ -4927,21 +5170,40 @@ DISAS_INSN(fscc)
tcg_temp_free(tmp);
}
+#if defined(CONFIG_SOFTMMU)
DISAS_INSN(frestore)
{
- M68kCPU *cpu = m68k_env_get_cpu(env);
+ TCGv addr;
- /* TODO: Implement frestore. */
- cpu_abort(CPU(cpu), "FRESTORE not implemented");
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
+ if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
+ SRC_EA(env, addr, OS_LONG, 0, NULL);
+ /* FIXME: check the state frame */
+ } else {
+ disas_undef(env, s, insn);
+ }
}
DISAS_INSN(fsave)
{
- M68kCPU *cpu = m68k_env_get_cpu(env);
+ if (IS_USER(s)) {
+ gen_exception(s, s->insn_pc, EXCP_PRIVILEGE);
+ return;
+ }
- /* TODO: Implement fsave. */
- cpu_abort(CPU(cpu), "FSAVE not implemented");
+ if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
+ /* always write IDLE */
+ TCGv idle = tcg_const_i32(0x41000000);
+ DEST_EA(env, insn, OS_LONG, idle, NULL);
+ tcg_temp_free(idle);
+ } else {
+ disas_undef(env, s, insn);
+ }
}
+#endif
static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
{
@@ -5314,7 +5576,7 @@ void register_m68k_insns (CPUM68KState *env)
BASE(undef, 0000, 0000);
INSN(arith_im, 0080, fff8, CF_ISA_A);
INSN(arith_im, 0000, ff00, M68000);
- INSN(undef, 00c0, ffc0, M68000);
+ INSN(chk2, 00c0, f9c0, CHK2);
INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
BASE(bitop_reg, 0100, f1c0);
BASE(bitop_reg, 0140, f1c0);
@@ -5347,6 +5609,7 @@ void register_m68k_insns (CPUM68KState *env)
BASE(move, 1000, f000);
BASE(move, 2000, f000);
BASE(move, 3000, f000);
+ INSN(chk, 4000, f040, M68000);
INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
INSN(negx, 4080, fff8, CF_ISA_A);
INSN(negx, 4000, ff00, M68000);
@@ -5364,8 +5627,9 @@ void register_m68k_insns (CPUM68KState *env)
BASE(move_to_ccr, 44c0, ffc0);
INSN(not, 4680, fff8, CF_ISA_A);
INSN(not, 4600, ff00, M68000);
- INSN(undef, 46c0, ffc0, M68000);
- INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
+#if defined(CONFIG_SOFTMMU)
+ BASE(move_to_sr, 46c0, ffc0);
+#endif
INSN(nbcd, 4800, ffc0, M68000);
INSN(linkl, 4808, fff8, M68000);
BASE(pea, 4840, ffc0);
@@ -5380,7 +5644,9 @@ void register_m68k_insns (CPUM68KState *env)
BASE(tst, 4a00, ff00);
INSN(tas, 4ac0, ffc0, CF_ISA_B);
INSN(tas, 4ac0, ffc0, M68000);
+#if defined(CONFIG_SOFTMMU)
INSN(halt, 4ac8, ffff, CF_ISA_A);
+#endif
INSN(pulse, 4acc, ffff, CF_ISA_A);
BASE(illegal, 4afc, ffff);
INSN(mull, 4c00, ffc0, CF_ISA_A);
@@ -5391,14 +5657,18 @@ void register_m68k_insns (CPUM68KState *env)
BASE(trap, 4e40, fff0);
BASE(link, 4e50, fff8);
BASE(unlk, 4e58, fff8);
+#if defined(CONFIG_SOFTMMU)
INSN(move_to_usp, 4e60, fff8, USP);
INSN(move_from_usp, 4e68, fff8, USP);
- BASE(nop, 4e71, ffff);
+ INSN(reset, 4e70, ffff, M68000);
BASE(stop, 4e72, ffff);
BASE(rte, 4e73, ffff);
+ INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
+ INSN(m68k_movec, 4e7a, fffe, M68000);
+#endif
+ BASE(nop, 4e71, ffff);
INSN(rtd, 4e74, ffff, RTD);
BASE(rts, 4e75, ffff);
- INSN(movec, 4e7b, ffff, CF_ISA_A);
BASE(jump, 4e80, ffc0);
BASE(jump, 4ec0, ffc0);
INSN(addsubq, 5000, f080, M68000);
@@ -5502,17 +5772,23 @@ void register_m68k_insns (CPUM68KState *env)
BASE(undef_fpu, f000, f000);
INSN(fpu, f200, ffc0, CF_FPU);
INSN(fbcc, f280, ffc0, CF_FPU);
- INSN(frestore, f340, ffc0, CF_FPU);
- INSN(fsave, f300, ffc0, CF_FPU);
INSN(fpu, f200, ffc0, FPU);
INSN(fscc, f240, ffc0, FPU);
INSN(fbcc, f280, ff80, FPU);
+#if defined(CONFIG_SOFTMMU)
+ INSN(frestore, f340, ffc0, CF_FPU);
+ INSN(fsave, f300, ffc0, CF_FPU);
INSN(frestore, f340, ffc0, FPU);
INSN(fsave, f300, ffc0, FPU);
INSN(intouch, f340, ffc0, CF_ISA_A);
INSN(cpushl, f428, ff38, CF_ISA_A);
+ INSN(cpush, f420, ff20, M68040);
+ INSN(cinv, f400, ff20, M68040);
INSN(wddata, fb00, ff00, CF_ISA_A);
INSN(wdebug, fbc0, ffc0, CF_ISA_A);
+#endif
+ INSN(move16_mem, f600, ffe0, M68040);
+ INSN(move16_reg, f620, fff8, M68040);
#undef INSN
}
@@ -5561,7 +5837,6 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
gen_tb_start(tb);
do {
pc_offset = dc->pc - pc_start;
- gen_throws_exception = NULL;
tcg_gen_insn_start(dc->pc, dc->cc_op);
num_insns++;
@@ -5661,9 +5936,12 @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
}
cpu_fprintf (f, "PC = %08x ", env->pc);
sr = env->sr | cpu_m68k_get_ccr(env);
- cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
- (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
- (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
+ cpu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
+ sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
+ (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
+ (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
+ (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
+ (sr & CCF_C) ? 'C' : '-');
cpu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
(env->fpsr & FPSR_CC_A) ? 'A' : '-',
(env->fpsr & FPSR_CC_I) ? 'I' : '-',
@@ -5696,6 +5974,14 @@ void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf(f, "RP ");
break;
}
+ cpu_fprintf(f, "\n");
+#ifdef CONFIG_SOFTMMU
+ cpu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
+ env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
+ env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
+ env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
+ cpu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
+#endif
}
void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 1e07e21c1c..4cf51568df 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -40,11 +40,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = mb_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ /* now we have a real cpu fault */
+ cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index e7b5597c46..7628b0e25b 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -1808,11 +1808,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
&& qemu_log_in_addr_range(pc_start)) {
qemu_log_lock();
qemu_log("--------------\n");
-#if DISAS_GNU
log_target_disas(cs, pc_start, dc->pc - pc_start);
-#endif
- qemu_log("\nisize=%d osize=%d\n",
- dc->pc - pc_start, tcg_op_buf_count());
qemu_log_unlock();
}
#endif
diff --git a/target/mips/translate.c b/target/mips/translate.c
index b022f840c9..d05ee67e63 100644
--- a/target/mips/translate.c
+++ b/target/mips/translate.c
@@ -20453,7 +20453,7 @@ void mips_tcg_init(void)
{
int i;
- TCGV_UNUSED(cpu_gpr[0]);
+ cpu_gpr[0] = NULL;
for (i = 1; i < 32; i++)
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUMIPSState, active_tc.gpr[i]),
diff --git a/target/moxie/helper.c b/target/moxie/helper.c
index 330299f5a7..6890ffd71c 100644
--- a/target/moxie/helper.c
+++ b/target/moxie/helper.c
@@ -36,11 +36,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = moxie_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret)) {
- if (retaddr) {
- cpu_restore_state(cs, retaddr);
- }
+ cpu_loop_exit_restore(cs, retaddr);
}
- cpu_loop_exit(cs);
}
void helper_raise_exception(CPUMoxieState *env, int ex)
diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h
index 9119eee587..88823a6d4d 100644
--- a/target/nios2/cpu.h
+++ b/target/nios2/cpu.h
@@ -20,7 +20,6 @@
#ifndef CPU_NIOS2_H
#define CPU_NIOS2_H
-#include "qemu/osdep.h"
#include "qemu-common.h"
#define TARGET_LONG_BITS 32
diff --git a/target/nios2/helper.c b/target/nios2/helper.c
index ef9ee05798..9f741a8f19 100644
--- a/target/nios2/helper.c
+++ b/target/nios2/helper.c
@@ -18,12 +18,9 @@
* <http://www.gnu.org/licenses/lgpl-2.1.html>
*/
-#include <stdio.h>
-#include <string.h>
-#include <assert.h>
+#include "qemu/osdep.h"
#include "cpu.h"
-#include "qemu/osdep.h"
#include "qemu/host-utils.h"
#include "qapi/error.h"
#include "exec/exec-all.h"
diff --git a/target/nios2/mmu.c b/target/nios2/mmu.c
index fe9298af50..0cd8647510 100644
--- a/target/nios2/mmu.c
+++ b/target/nios2/mmu.c
@@ -42,11 +42,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = nios2_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ /* now we have a real cpu fault */
+ cpu_loop_exit_restore(cs, retaddr);
}
}
diff --git a/target/nios2/op_helper.c b/target/nios2/op_helper.c
index efb1c489c9..c853aeae02 100644
--- a/target/nios2/op_helper.c
+++ b/target/nios2/op_helper.c
@@ -18,6 +18,7 @@
* <http://www.gnu.org/licenses/lgpl-2.1.html>
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
index 72329002ac..cb8624e8d2 100644
--- a/target/nios2/translate.c
+++ b/target/nios2/translate.c
@@ -21,6 +21,7 @@
* <http://www.gnu.org/licenses/lgpl-2.1.html>
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg-op.h"
#include "exec/exec-all.h"
@@ -124,7 +125,7 @@ static uint8_t get_opxcode(uint32_t code)
static TCGv load_zero(DisasContext *dc)
{
- if (TCGV_IS_UNUSED_I32(dc->zero)) {
+ if (!dc->zero) {
dc->zero = tcg_const_i32(0);
}
return dc->zero;
@@ -754,12 +755,12 @@ static void handle_instruction(DisasContext *dc, CPUNios2State *env)
goto illegal_op;
}
- TCGV_UNUSED_I32(dc->zero);
+ dc->zero = NULL;
instr = &i_type_instructions[op];
instr->handler(dc, code, instr->flags);
- if (!TCGV_IS_UNUSED_I32(dc->zero)) {
+ if (dc->zero) {
tcg_temp_free(dc->zero);
}
diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c
index a8a5f69b05..6073a5b21c 100644
--- a/target/openrisc/exception_helper.c
+++ b/target/openrisc/exception_helper.c
@@ -21,7 +21,6 @@
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
-#include "exec/exec-all.h"
#include "exception.h"
void HELPER(exception)(CPUOpenRISCState *env, uint32_t excp)
diff --git a/target/openrisc/mmu_helper.c b/target/openrisc/mmu_helper.c
index a44d0aa51a..a3e182c42d 100644
--- a/target/openrisc/mmu_helper.c
+++ b/target/openrisc/mmu_helper.c
@@ -33,12 +33,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = openrisc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (ret) {
- if (retaddr) {
- /* now we have a real cpu fault. */
- cpu_restore_state(cs, retaddr);
- }
/* Raise Exception. */
- cpu_loop_exit(cs);
+ cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h
index 429b47f959..deaa46a14b 100644
--- a/target/ppc/cpu-qom.h
+++ b/target/ppc/cpu-qom.h
@@ -191,6 +191,7 @@ typedef struct PowerPCCPUClass {
uint64_t insns_flags;
uint64_t insns_flags2;
uint64_t msr_mask;
+ uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */
powerpc_mmu_t mmu_model;
powerpc_excp_t excp_model;
powerpc_input_t bus_model;
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index 989761b795..370b05e76e 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -87,6 +87,13 @@
#define PPC_ELF_MACHINE EM_PPC
#endif
+#define PPC_BIT(bit) (0x8000000000000000UL >> (bit))
+#define PPC_BIT32(bit) (0x80000000UL >> (bit))
+#define PPC_BIT8(bit) (0x80UL >> (bit))
+#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be)) | \
+ PPC_BIT32(bs))
+
/*****************************************************************************/
/* Exception vectors definitions */
enum {
@@ -371,10 +378,10 @@ struct ppc_slb_t {
#define MSR_LE 0 /* Little-endian mode 1 hflags */
/* LPCR bits */
-#define LPCR_VPM0 (1ull << (63 - 0))
-#define LPCR_VPM1 (1ull << (63 - 1))
-#define LPCR_ISL (1ull << (63 - 2))
-#define LPCR_KBV (1ull << (63 - 3))
+#define LPCR_VPM0 PPC_BIT(0)
+#define LPCR_VPM1 PPC_BIT(1)
+#define LPCR_ISL PPC_BIT(2)
+#define LPCR_KBV PPC_BIT(3)
#define LPCR_DPFD_SHIFT (63 - 11)
#define LPCR_DPFD (0x7ull << LPCR_DPFD_SHIFT)
#define LPCR_VRMASD_SHIFT (63 - 16)
@@ -382,41 +389,41 @@ struct ppc_slb_t {
/* P9: Power-saving mode Exit Cause Enable (Upper Section) Mask */
#define LPCR_PECE_U_SHIFT (63 - 19)
#define LPCR_PECE_U_MASK (0x7ull << LPCR_PECE_U_SHIFT)
-#define LPCR_HVEE (1ull << (63 - 17)) /* Hypervisor Virt Exit Enable */
+#define LPCR_HVEE PPC_BIT(17) /* Hypervisor Virt Exit Enable */
#define LPCR_RMLS_SHIFT (63 - 37)
#define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT)
-#define LPCR_ILE (1ull << (63 - 38))
+#define LPCR_ILE PPC_BIT(38)
#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
-#define LPCR_UPRT (1ull << (63 - 41)) /* Use Process Table */
-#define LPCR_EVIRT (1ull << (63 - 42)) /* Enhanced Virtualisation */
-#define LPCR_ONL (1ull << (63 - 45))
-#define LPCR_LD (1ull << (63 - 46)) /* Large Decrementer */
-#define LPCR_P7_PECE0 (1ull << (63 - 49))
-#define LPCR_P7_PECE1 (1ull << (63 - 50))
-#define LPCR_P7_PECE2 (1ull << (63 - 51))
-#define LPCR_P8_PECE0 (1ull << (63 - 47))
-#define LPCR_P8_PECE1 (1ull << (63 - 48))
-#define LPCR_P8_PECE2 (1ull << (63 - 49))
-#define LPCR_P8_PECE3 (1ull << (63 - 50))
-#define LPCR_P8_PECE4 (1ull << (63 - 51))
+#define LPCR_UPRT PPC_BIT(41) /* Use Process Table */
+#define LPCR_EVIRT PPC_BIT(42) /* Enhanced Virtualisation */
+#define LPCR_ONL PPC_BIT(45)
+#define LPCR_LD PPC_BIT(46) /* Large Decrementer */
+#define LPCR_P7_PECE0 PPC_BIT(49)
+#define LPCR_P7_PECE1 PPC_BIT(50)
+#define LPCR_P7_PECE2 PPC_BIT(51)
+#define LPCR_P8_PECE0 PPC_BIT(47)
+#define LPCR_P8_PECE1 PPC_BIT(48)
+#define LPCR_P8_PECE2 PPC_BIT(49)
+#define LPCR_P8_PECE3 PPC_BIT(50)
+#define LPCR_P8_PECE4 PPC_BIT(51)
/* P9: Power-saving mode Exit Cause Enable (Lower Section) Mask */
#define LPCR_PECE_L_SHIFT (63 - 51)
#define LPCR_PECE_L_MASK (0x1full << LPCR_PECE_L_SHIFT)
-#define LPCR_PDEE (1ull << (63 - 47)) /* Privileged Doorbell Exit EN */
-#define LPCR_HDEE (1ull << (63 - 48)) /* Hyperv Doorbell Exit Enable */
-#define LPCR_EEE (1ull << (63 - 49)) /* External Exit Enable */
-#define LPCR_DEE (1ull << (63 - 50)) /* Decrementer Exit Enable */
-#define LPCR_OEE (1ull << (63 - 51)) /* Other Exit Enable */
-#define LPCR_MER (1ull << (63 - 52))
-#define LPCR_GTSE (1ull << (63 - 53)) /* Guest Translation Shootdown */
-#define LPCR_TC (1ull << (63 - 54))
-#define LPCR_HEIC (1ull << (63 - 59)) /* HV Extern Interrupt Control */
-#define LPCR_LPES0 (1ull << (63 - 60))
-#define LPCR_LPES1 (1ull << (63 - 61))
-#define LPCR_RMI (1ull << (63 - 62))
-#define LPCR_HVICE (1ull << (63 - 62)) /* HV Virtualisation Int Enable */
-#define LPCR_HDICE (1ull << (63 - 63))
+#define LPCR_PDEE PPC_BIT(47) /* Privileged Doorbell Exit EN */
+#define LPCR_HDEE PPC_BIT(48) /* Hyperv Doorbell Exit Enable */
+#define LPCR_EEE PPC_BIT(49) /* External Exit Enable */
+#define LPCR_DEE PPC_BIT(50) /* Decrementer Exit Enable */
+#define LPCR_OEE PPC_BIT(51) /* Other Exit Enable */
+#define LPCR_MER PPC_BIT(52)
+#define LPCR_GTSE PPC_BIT(53) /* Guest Translation Shootdown */
+#define LPCR_TC PPC_BIT(54)
+#define LPCR_HEIC PPC_BIT(59) /* HV Extern Interrupt Control */
+#define LPCR_LPES0 PPC_BIT(60)
+#define LPCR_LPES1 PPC_BIT(61)
+#define LPCR_RMI PPC_BIT(62)
+#define LPCR_HVICE PPC_BIT(62) /* HV Virtualisation Int Enable */
+#define LPCR_HDICE PPC_BIT(63)
#define msr_sf ((env->msr >> MSR_SF) & 1)
#define msr_isf ((env->msr >> MSR_ISF) & 1)
@@ -507,22 +514,22 @@ struct ppc_slb_t {
#define FSCR_IC_TAR 8
/* Exception state register bits definition */
-#define ESR_PIL (1 << (63 - 36)) /* Illegal Instruction */
-#define ESR_PPR (1 << (63 - 37)) /* Privileged Instruction */
-#define ESR_PTR (1 << (63 - 38)) /* Trap */
-#define ESR_FP (1 << (63 - 39)) /* Floating-Point Operation */
-#define ESR_ST (1 << (63 - 40)) /* Store Operation */
-#define ESR_AP (1 << (63 - 44)) /* Auxiliary Processor Operation */
-#define ESR_PUO (1 << (63 - 45)) /* Unimplemented Operation */
-#define ESR_BO (1 << (63 - 46)) /* Byte Ordering */
-#define ESR_PIE (1 << (63 - 47)) /* Imprecise exception */
-#define ESR_DATA (1 << (63 - 53)) /* Data Access (Embedded page table) */
-#define ESR_TLBI (1 << (63 - 54)) /* TLB Ineligible (Embedded page table) */
-#define ESR_PT (1 << (63 - 55)) /* Page Table (Embedded page table) */
-#define ESR_SPV (1 << (63 - 56)) /* SPE/VMX operation */
-#define ESR_EPID (1 << (63 - 57)) /* External Process ID operation */
-#define ESR_VLEMI (1 << (63 - 58)) /* VLE operation */
-#define ESR_MIF (1 << (63 - 62)) /* Misaligned instruction (VLE) */
+#define ESR_PIL PPC_BIT(36) /* Illegal Instruction */
+#define ESR_PPR PPC_BIT(37) /* Privileged Instruction */
+#define ESR_PTR PPC_BIT(38) /* Trap */
+#define ESR_FP PPC_BIT(39) /* Floating-Point Operation */
+#define ESR_ST PPC_BIT(40) /* Store Operation */
+#define ESR_AP PPC_BIT(44) /* Auxiliary Processor Operation */
+#define ESR_PUO PPC_BIT(45) /* Unimplemented Operation */
+#define ESR_BO PPC_BIT(46) /* Byte Ordering */
+#define ESR_PIE PPC_BIT(47) /* Imprecise exception */
+#define ESR_DATA PPC_BIT(53) /* Data Access (Embedded page table) */
+#define ESR_TLBI PPC_BIT(54) /* TLB Ineligible (Embedded page table) */
+#define ESR_PT PPC_BIT(55) /* Page Table (Embedded page table) */
+#define ESR_SPV PPC_BIT(56) /* SPE/VMX operation */
+#define ESR_EPID PPC_BIT(57) /* External Process ID operation */
+#define ESR_VLEMI PPC_BIT(58) /* VLE operation */
+#define ESR_MIF PPC_BIT(62) /* Misaligned instruction (VLE) */
/* Transaction EXception And Summary Register bits */
#define TEXASR_FAILURE_PERSISTENT (63 - 7)
@@ -1991,7 +1998,7 @@ void ppc_compat_add_property(Object *obj, const char *name,
#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
#define HID0_DOZE (1 << 23) /* pre-2.06 */
#define HID0_NAP (1 << 22) /* pre-2.06 */
-#define HID0_HILE (1ull << (63 - 19)) /* POWER8 */
+#define HID0_HILE PPC_BIT(19) /* POWER8 */
/*****************************************************************************/
/* PowerPC Instructions types definitions */
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 9d57debf0e..4664a3ce9d 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -47,9 +47,6 @@
#include "sysemu/hostmem.h"
#include "qemu/cutils.h"
#include "qemu/mmap-alloc.h"
-#if defined(TARGET_PPC64)
-#include "hw/ppc/spapr_cpu_core.h"
-#endif
#include "elf.h"
#include "sysemu/kvm_int.h"
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 998fbed848..0ef21cce33 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3419,7 +3419,7 @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
}
/*** Branch ***/
-static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
{
if (NARROW_MODE(ctx)) {
dest = (uint32_t) dest;
@@ -3441,7 +3441,7 @@ static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
gen_debug_exception(ctx);
}
}
- tcg_gen_exit_tb(0);
+ tcg_gen_lookup_and_goto_ptr();
}
}
@@ -3479,7 +3479,7 @@ static void gen_b(DisasContext *ctx)
#define BCOND_CTR 2
#define BCOND_TAR 3
-static inline void gen_bcond(DisasContext *ctx, int type)
+static void gen_bcond(DisasContext *ctx, int type)
{
uint32_t bo = BO(ctx->opcode);
TCGLabel *l1;
@@ -3495,7 +3495,7 @@ static inline void gen_bcond(DisasContext *ctx, int type)
else
tcg_gen_mov_tl(target, cpu_lr);
} else {
- TCGV_UNUSED(target);
+ target = NULL;
}
if (LK(ctx->opcode))
gen_setlr(ctx, ctx->nip);
@@ -3543,26 +3543,19 @@ static inline void gen_bcond(DisasContext *ctx, int type)
} else {
gen_goto_tb(ctx, 0, li);
}
- if ((bo & 0x14) != 0x14) {
- gen_set_label(l1);
- gen_goto_tb(ctx, 1, ctx->nip);
- }
} else {
if (NARROW_MODE(ctx)) {
tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
} else {
tcg_gen_andi_tl(cpu_nip, target, ~3);
}
- tcg_gen_exit_tb(0);
- if ((bo & 0x14) != 0x14) {
- gen_set_label(l1);
- gen_update_nip(ctx, ctx->nip);
- tcg_gen_exit_tb(0);
- }
- }
- if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
+ tcg_gen_lookup_and_goto_ptr();
tcg_temp_free(target);
}
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->nip);
+ }
}
static void gen_bc(DisasContext *ctx)
diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c
index 4e11e6f489..70ff15a51a 100644
--- a/target/ppc/translate_init.c
+++ b/target/ppc/translate_init.c
@@ -8535,6 +8535,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+ pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2;
}
static void init_proc_POWER8(CPUPPCState *env)
@@ -8704,6 +8705,8 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+ pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
+ LPCR_P8_PECE3 | LPCR_P8_PECE4;
}
#ifdef CONFIG_SOFTMMU
@@ -8898,14 +8901,17 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+ pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
}
#if !defined(CONFIG_USER_ONLY)
void cpu_ppc_set_papr(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
ppc_spr_t *lpcr = &env->spr_cb[SPR_LPCR];
ppc_spr_t *amor = &env->spr_cb[SPR_AMOR];
+ CPUState *cs = CPU(cpu);
cpu->vhyp = vhyp;
@@ -8932,8 +8938,7 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
lpcr->default_value &= ~LPCR_RMLS;
lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT;
- switch (env->mmu_model) {
- case POWERPC_MMU_3_00:
+ if (env->mmu_model == POWERPC_MMU_3_00) {
/* By default we choose legacy mode and switch to new hash or radix
* when a register process table hcall is made. So disable process
* tables and guest translation shootdown by default
@@ -8947,16 +8952,13 @@ void cpu_ppc_set_papr(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp)
} else {
lpcr->default_value &= ~(LPCR_UPRT | LPCR_GTSE);
}
- lpcr->default_value |= LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE |
- LPCR_OEE;
- break;
- default:
- /* P7 and P8 has slightly different PECE bits, mostly because P8 adds
- * bit 47 and 48 which are reserved on P7. Here we set them all, which
- * will work as expected for both implementations
- */
- lpcr->default_value |= LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
- LPCR_P8_PECE3 | LPCR_P8_PECE4;
+ }
+
+ /* Only enable Power-saving mode Exit Cause exceptions on the boot
+ * CPU. The RTAS command start-cpu will enable them on secondaries.
+ */
+ if (cs == first_cpu) {
+ lpcr->default_value |= pcc->lpcr_pm;
}
/* We should be followed by a CPU reset but update the active value
diff --git a/target/s390x/cc_helper.c b/target/s390x/cc_helper.c
index f008897e84..5d91e458a8 100644
--- a/target/s390x/cc_helper.c
+++ b/target/s390x/cc_helper.c
@@ -564,7 +564,7 @@ void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
break;
default:
HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
- program_interrupt(env, PGM_SPECIFICATION, 2);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
break;
}
}
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
index 4db8b5409e..1a8b6b9ae9 100644
--- a/target/s390x/cpu.h
+++ b/target/s390x/cpu.h
@@ -351,6 +351,9 @@ extern const struct VMStateDescription vmstate_s390_cpu;
#define CR0_CPU_TIMER_SC 0x0000000000000400ULL
#define CR0_SERVICE_SC 0x0000000000000200ULL
+/* Control register 14 bits */
+#define CR14_CHANNEL_REPORT_SC 0x0000000010000000ULL
+
/* MMU */
#define MMU_PRIMARY_IDX 0
#define MMU_SECONDARY_IDX 1
@@ -674,6 +677,26 @@ struct sysib_322 {
#define MCIC_VB_CT 0x0000000000020000ULL
#define MCIC_VB_CC 0x0000000000010000ULL
+static inline uint64_t s390_build_validity_mcic(void)
+{
+ uint64_t mcic;
+
+ /*
+ * Indicate all validity bits (no damage) only. Other bits have to be
+ * added by the caller. (storage errors, subclasses and subclass modifiers)
+ */
+ mcic = MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
+ MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
+ MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ mcic |= MCIC_VB_VR;
+ }
+ if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
+ mcic |= MCIC_VB_GS;
+ }
+ return mcic;
+}
+
/* cpu.c */
int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low);
@@ -699,6 +722,9 @@ static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
/* cpu_models.c */
void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_list s390_cpu_list
+void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ const S390FeatInit feat_init);
+
/* helper.c */
#define cpu_init(cpu_model) cpu_generic_init(TYPE_S390_CPU, cpu_model)
@@ -719,7 +745,9 @@ void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
uint32_t io_int_parm, uint32_t io_int_word);
/* automatically detect the instruction length */
#define ILEN_AUTO 0xff
-void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
+#define RA_IGNORED 0
+void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
+ uintptr_t ra);
/* service interrupts are floating therefore we must not pass an cpustate */
void s390_sclp_extint(uint32_t parm);
@@ -733,6 +761,7 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
+void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra);
/* sigp.c */
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index c4c37b3b15..212a5f0697 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -15,7 +15,6 @@
#include "internal.h"
#include "kvm_s390x.h"
#include "sysemu/kvm.h"
-#include "gen-features.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qemu/error-report.h"
@@ -81,6 +80,12 @@ static S390CPUDef s390_cpu_defs[] = {
CPUDEF_INIT(0x3906, 14, 1, 47, 0x08000000U, "z14", "IBM z14 GA1"),
};
+#define QEMU_MAX_CPU_TYPE 0x2827
+#define QEMU_MAX_CPU_GEN 12
+#define QEMU_MAX_CPU_EC_GA 2
+static const S390FeatInit qemu_max_cpu_feat_init = { S390_FEAT_LIST_QEMU_MAX };
+static S390FeatBitmap qemu_max_cpu_feat;
+
/* features part of a base model but not relevant for finding a base model */
S390FeatBitmap ignored_base_feat;
@@ -812,48 +817,6 @@ static void check_compatibility(const S390CPUModel *max_model,
"available in the configuration: ");
}
-/**
- * The base TCG CPU model "qemu" is based on the z900. However, we already
- * can also emulate some additional features of later CPU generations, so
- * we add these additional feature bits here.
- */
-static void add_qemu_cpu_model_features(S390FeatBitmap fbm)
-{
- static const int feats[] = {
- S390_FEAT_DAT_ENH,
- S390_FEAT_IDTE_SEGMENT,
- S390_FEAT_STFLE,
- S390_FEAT_SENSE_RUNNING_STATUS,
- S390_FEAT_EXTENDED_IMMEDIATE,
- S390_FEAT_EXTENDED_TRANSLATION_2,
- S390_FEAT_MSA,
- S390_FEAT_EXTENDED_TRANSLATION_3,
- S390_FEAT_LONG_DISPLACEMENT,
- S390_FEAT_LONG_DISPLACEMENT_FAST,
- S390_FEAT_ETF2_ENH,
- S390_FEAT_STORE_CLOCK_FAST,
- S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
- S390_FEAT_ETF3_ENH,
- S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
- S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
- S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
- S390_FEAT_EXECUTE_EXT,
- S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
- S390_FEAT_STFLE_45,
- S390_FEAT_STFLE_49,
- S390_FEAT_LOCAL_TLB_CLEARING,
- S390_FEAT_STFLE_53,
- S390_FEAT_MSA_EXT_5,
- S390_FEAT_MSA_EXT_3,
- S390_FEAT_MSA_EXT_4,
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(feats); i++) {
- set_bit(feats[i], fbm);
- }
-}
-
static S390CPUModel *get_max_cpu_model(Error **errp)
{
static S390CPUModel max_model;
@@ -866,12 +829,10 @@ static S390CPUModel *get_max_cpu_model(Error **errp)
if (kvm_enabled()) {
kvm_s390_get_host_cpu_model(&max_model, errp);
} else {
- /* TCG emulates a z900 (with some optional additional features) */
- max_model.def = &s390_cpu_defs[0];
- bitmap_copy(max_model.features, max_model.def->default_feat,
- S390_FEAT_MAX);
- add_qemu_cpu_model_features(max_model.features);
- }
+ max_model.def = s390_find_cpu_def(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN,
+ QEMU_MAX_CPU_EC_GA, NULL);
+ bitmap_copy(max_model.features, qemu_max_cpu_feat, S390_FEAT_MAX);
+ }
if (!*errp) {
cached = true;
return &max_model;
@@ -1127,18 +1088,42 @@ static void s390_host_cpu_model_initfn(Object *obj)
}
#endif
+static S390CPUDef s390_qemu_cpu_def;
+static S390CPUModel s390_qemu_cpu_model;
+
+/* Set the qemu CPU model (on machine initialization). Must not be called
+ * once CPUs have been created.
+ */
+void s390_set_qemu_cpu_model(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ const S390FeatInit feat_init)
+{
+ const S390CPUDef *def = s390_find_cpu_def(type, gen, ec_ga, NULL);
+
+ g_assert(def);
+ g_assert(QTAILQ_EMPTY(&cpus));
+
+ /* TCG emulates some features that can usually not be enabled with
+ * the emulated machine generation. Make sure they can be enabled
+ * when using the QEMU model by adding them to full_feat. We have
+ * to copy the definition to do that.
+ */
+ memcpy(&s390_qemu_cpu_def, def, sizeof(s390_qemu_cpu_def));
+ bitmap_or(s390_qemu_cpu_def.full_feat, s390_qemu_cpu_def.full_feat,
+ qemu_max_cpu_feat, S390_FEAT_MAX);
+
+ /* build the CPU model */
+ s390_qemu_cpu_model.def = &s390_qemu_cpu_def;
+ bitmap_zero(s390_qemu_cpu_model.features, S390_FEAT_MAX);
+ s390_init_feat_bitmap(feat_init, s390_qemu_cpu_model.features);
+}
+
static void s390_qemu_cpu_model_initfn(Object *obj)
{
- static S390CPUDef s390_qemu_cpu_defs;
S390CPU *cpu = S390_CPU(obj);
cpu->model = g_malloc0(sizeof(*cpu->model));
- /* TCG emulates a z900 (with some optional additional features) */
- memcpy(&s390_qemu_cpu_defs, &s390_cpu_defs[0], sizeof(s390_qemu_cpu_defs));
- add_qemu_cpu_model_features(s390_qemu_cpu_defs.full_feat);
- cpu->model->def = &s390_qemu_cpu_defs;
- bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
- S390_FEAT_MAX);
+ /* copy the CPU model so we can modify it */
+ memcpy(cpu->model, &s390_qemu_cpu_model, sizeof(*cpu->model));
}
static void s390_cpu_model_finalize(Object *obj)
@@ -1279,11 +1264,13 @@ static void init_ignored_base_feat(void)
static void register_types(void)
{
+ static const S390FeatInit qemu_latest_init = { S390_FEAT_LIST_QEMU_LATEST };
int i;
init_ignored_base_feat();
/* init all bitmaps from gnerated data initially */
+ s390_init_feat_bitmap(qemu_max_cpu_feat_init, qemu_max_cpu_feat);
for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
s390_init_feat_bitmap(s390_cpu_defs[i].base_init,
s390_cpu_defs[i].base_feat);
@@ -1293,6 +1280,10 @@ static void register_types(void)
s390_cpu_defs[i].full_feat);
}
+ /* initialize the qemu model with latest definition */
+ s390_set_qemu_cpu_model(QEMU_MAX_CPU_TYPE, QEMU_MAX_CPU_GEN,
+ QEMU_MAX_CPU_EC_GA, qemu_latest_init);
+
for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name);
TypeInfo ti_base = {
diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h
index 4c6dee1871..11cf5386fb 100644
--- a/target/s390x/cpu_models.h
+++ b/target/s390x/cpu_models.h
@@ -14,6 +14,7 @@
#define TARGET_S390X_CPU_MODELS_H
#include "cpu_features.h"
+#include "gen-features.h"
#include "qom/cpu.h"
/* static CPU definition */
diff --git a/target/s390x/crypto_helper.c b/target/s390x/crypto_helper.c
index fa360a2d6e..5c79790187 100644
--- a/target/s390x/crypto_helper.c
+++ b/target/s390x/crypto_helper.c
@@ -23,7 +23,6 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
const uintptr_t ra = GETPC();
const uint8_t mod = env->regs[0] & 0x80ULL;
const uint8_t fc = env->regs[0] & 0x7fULL;
- CPUState *cs = CPU(s390_env_get_cpu(env));
uint8_t subfunc[16] = { 0 };
uint64_t param_addr;
int i;
@@ -35,8 +34,7 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
case S390_FEAT_TYPE_PCKMO:
case S390_FEAT_TYPE_PCC:
if (mod) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
break;
@@ -44,8 +42,7 @@ uint32_t HELPER(msa)(CPUS390XState *env, uint32_t r1, uint32_t r2, uint32_t r3,
s390_get_feat_block(type, subfunc);
if (!test_be_bit(fc, subfunc)) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return 0;
}
diff --git a/target/s390x/diag.c b/target/s390x/diag.c
index dbbb9e886f..a755837ad5 100644
--- a/target/s390x/diag.c
+++ b/target/s390x/diag.c
@@ -99,19 +99,19 @@ int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
#define DIAG_308_RC_NO_CONF 0x0102
#define DIAG_308_RC_INVALID 0x0402
-void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra)
{
uint64_t addr = env->regs[r1];
uint64_t subcode = env->regs[r3];
IplParameterBlock *iplb;
if (env->psw.mask & PSW_MASK_PSTATE) {
- program_interrupt(env, PGM_PRIVILEGED, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_PRIVILEGED, ILEN_AUTO, ra);
return;
}
if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
- program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
return;
}
@@ -136,12 +136,12 @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
break;
case 5:
if ((r1 & 1) || (addr & 0x0fffULL)) {
- program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
return;
}
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), false)) {
- program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
return;
}
iplb = g_new0(IplParameterBlock, 1);
@@ -165,12 +165,12 @@ out:
return;
case 6:
if ((r1 & 1) || (addr & 0x0fffULL)) {
- program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, ra);
return;
}
if (!address_space_access_valid(&address_space_memory, addr,
sizeof(IplParameterBlock), true)) {
- program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO, ra);
return;
}
iplb = s390_ipl_get_iplb();
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
index e04b670663..f4697a884d 100644
--- a/target/s390x/excp_helper.c
+++ b/target/s390x/excp_helper.c
@@ -395,6 +395,9 @@ static void do_mchk_interrupt(CPUS390XState *env)
lowcore = cpu_map_lowcore(env);
+ /* we are always in z/Architecture mode */
+ lowcore->ar_access_id = 1;
+
for (i = 0; i < 16; i++) {
lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
@@ -404,13 +407,10 @@ static void do_mchk_interrupt(CPUS390XState *env)
lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
- lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
- lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
- lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
- lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
+ lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
+ lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
- lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
- lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
+ lowcore->mcic = cpu_to_be64(s390_build_validity_mcic() | MCIC_SC_CP);
lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
@@ -554,10 +554,7 @@ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
S390CPU *cpu = S390_CPU(cs);
CPUS390XState *env = &cpu->env;
- if (retaddr) {
- cpu_restore_state(cs, retaddr);
- }
- program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, retaddr);
}
#endif /* CONFIG_USER_ONLY */
diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c
index ffbeb3b2df..334159119f 100644
--- a/target/s390x/fpu_helper.c
+++ b/target/s390x/fpu_helper.c
@@ -44,7 +44,7 @@ static void ieee_exception(CPUS390XState *env, uint32_t dxc, uintptr_t retaddr)
/* Install the DXC code. */
env->fpc = (env->fpc & ~0xff00) | (dxc << 8);
/* Trap. */
- runtime_exception(env, PGM_DATA, retaddr);
+ s390_program_interrupt(env, PGM_DATA, ILEN_AUTO, retaddr);
}
/* Should be called after any operation that may raise IEEE exceptions. */
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
index 68e6c31b4b..b24f6ada5b 100644
--- a/target/s390x/gen-features.c
+++ b/target/s390x/gen-features.c
@@ -536,6 +536,52 @@ static uint16_t default_GEN14_GA1[] = {
S390_FEAT_GROUP_MSA_EXT_8,
};
+/* QEMU (CPU model) features */
+
+static uint16_t qemu_V2_11[] = {
+ S390_FEAT_GROUP_PLO,
+ S390_FEAT_ESAN3,
+ S390_FEAT_ZARCH,
+};
+
+static uint16_t qemu_LATEST[] = {
+ S390_FEAT_DAT_ENH,
+ S390_FEAT_IDTE_SEGMENT,
+ S390_FEAT_STFLE,
+ S390_FEAT_SENSE_RUNNING_STATUS,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_MSA,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_ETF3_ENH,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_STFLE_45,
+ S390_FEAT_STFLE_49,
+ S390_FEAT_LOCAL_TLB_CLEARING,
+ S390_FEAT_INTERLOCKED_ACCESS_2,
+ S390_FEAT_MSA_EXT_4,
+ S390_FEAT_MSA_EXT_3,
+};
+
+/* add all new definitions before this point */
+static uint16_t qemu_MAX[] = {
+ /* z13+ features */
+ S390_FEAT_STFLE_53,
+ /* generates a dependency warning, leave it out for now */
+ S390_FEAT_MSA_EXT_5,
+};
+
/****** END FEATURE DEFS ******/
#define _YEARS "2016"
@@ -627,6 +673,24 @@ static FeatGroupDefSpec FeatGroupDef[] = {
FEAT_GROUP_INITIALIZER(MSA_EXT_8),
};
+#define QEMU_FEAT_INITIALIZER(_name) \
+ { \
+ .name = "S390_FEAT_LIST_QEMU_" #_name, \
+ .bits = \
+ { .data = qemu_##_name, \
+ .len = ARRAY_SIZE(qemu_##_name) }, \
+ }
+
+/*******************************
+ * QEMU (CPU model) features
+ *******************************/
+static FeatGroupDefSpec QemuFeatDef[] = {
+ QEMU_FEAT_INITIALIZER(V2_11),
+ QEMU_FEAT_INITIALIZER(LATEST),
+ QEMU_FEAT_INITIALIZER(MAX),
+};
+
+
static void set_bits(uint64_t list[], BitSpec bits)
{
uint32_t i;
@@ -684,6 +748,29 @@ static void print_feature_defs(void)
}
}
+static void print_qemu_feature_defs(void)
+{
+ uint64_t feat[S390_FEAT_MAX / 64 + 1] = {};
+ int i, j;
+
+ printf("\n/* QEMU (CPU model) feature list data */\n");
+
+ /* for now we assume that we only add new features */
+ for (i = 0; i < ARRAY_SIZE(QemuFeatDef); i++) {
+ set_bits(feat, QemuFeatDef[i].bits);
+
+ printf("#define %s\t", QemuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(feat); j++) {
+ printf("0x%016"PRIx64"ULL", feat[j]);
+ if (j < ARRAY_SIZE(feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ }
+}
+
static void print_feature_group_defs(void)
{
int i, j;
@@ -721,6 +808,7 @@ int main(int argc, char *argv[])
"#ifndef %s\n#define %s\n", __FILE__, _YEARS, _NAME_H, _NAME_H);
print_feature_defs();
print_feature_group_defs();
+ print_qemu_feature_defs();
printf("\n#endif\n");
return 0;
}
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
index 246ba20f0d..35d9741918 100644
--- a/target/s390x/helper.c
+++ b/target/s390x/helper.c
@@ -31,24 +31,6 @@
#include "sysemu/sysemu.h"
#endif
-//#define DEBUG_S390
-//#define DEBUG_S390_STDOUT
-
-#ifdef DEBUG_S390
-#ifdef DEBUG_S390_STDOUT
-#define DPRINTF(fmt, ...) \
- do { fprintf(stderr, fmt, ## __VA_ARGS__); \
- if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
-#else
-#define DPRINTF(fmt, ...) \
- do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
-#endif
-#else
-#define DPRINTF(fmt, ...) \
- do { } while (0)
-#endif
-
-
#ifndef CONFIG_USER_ONLY
void s390x_tod_timer(void *opaque)
{
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
index 9459b73c73..2f17b62d3d 100644
--- a/target/s390x/helper.h
+++ b/target/s390x/helper.h
@@ -119,6 +119,7 @@ DEF_HELPER_4(cu24, i32, env, i32, i32, i32)
DEF_HELPER_4(cu41, i32, env, i32, i32, i32)
DEF_HELPER_4(cu42, i32, env, i32, i32, i32)
DEF_HELPER_5(msa, i32, env, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_3(servc, i32, env, i64, i64)
@@ -127,9 +128,9 @@ DEF_HELPER_3(load_psw, noreturn, env, i64, i64)
DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env)
DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(sckpf, TCG_CALL_NO_RWG, void, env, i64)
DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env)
DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64)
-DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
DEF_HELPER_4(stsi, i32, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
@@ -164,7 +165,10 @@ DEF_HELPER_2(hsch, void, env, i64)
DEF_HELPER_3(msch, void, env, i64, i64)
DEF_HELPER_2(rchp, void, env, i64)
DEF_HELPER_2(rsch, void, env, i64)
+DEF_HELPER_2(sal, void, env, i64)
+DEF_HELPER_4(schm, void, env, i64, i64, i64)
DEF_HELPER_3(ssch, void, env, i64, i64)
+DEF_HELPER_2(stcrw, void, env, i64)
DEF_HELPER_3(stsch, void, env, i64, i64)
DEF_HELPER_3(tsch, void, env, i64, i64)
DEF_HELPER_2(chsc, void, env, i64)
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
index 16e27c8a35..11ee43dcbc 100644
--- a/target/s390x/insn-data.def
+++ b/target/s390x/insn-data.def
@@ -39,10 +39,10 @@
C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32)
/* ADD IMMEDIATE */
C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32)
- C(0xeb6a, ASI, SIY, GIE, m1_32s, i2, new, m1_32, add, adds32)
+ D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL)
C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32)
C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64)
- C(0xeb7a, AGSI, SIY, GIE, m1_64, i2, new, m1_64, add, adds64)
+ D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ)
C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64)
/* ADD IMMEDIATE HIGH */
C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32)
@@ -70,9 +70,9 @@
C(0xc20b, ALFI, RIL_a, EI, r1, i2_32u, new, r1_32, add, addu32)
C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, add, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE */
- C(0xeb6e, ALSI, SIY, GIE, m1_32u, i2, new, m1_32, add, addu32)
+ D(0xeb6e, ALSI, SIY, GIE, la1, i2, new, 0, asi, addu32, MO_TEUL)
C(0xecda, ALHSIK, RIE_d, DO, r3, i2, new, r1_32, add, addu32)
- C(0xeb7e, ALGSI, SIY, GIE, m1_64, i2, new, m1_64, add, addu64)
+ D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asi, addu64, MO_TEQ)
C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, add, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, addu32)
@@ -99,8 +99,8 @@
D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020)
D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010)
D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000)
- C(0x9400, NI, SI, Z, m1_8u, i2_8u, new, m1_8, and, nz64)
- C(0xeb54, NIY, SIY, LD, m1_8u, i2_8u, new, m1_8, and, nz64)
+ D(0x9400, NI, SI, Z, la1, i2_8u, new, 0, ni, nz64, MO_UB)
+ D(0xeb54, NIY, SIY, LD, la1, i2_8u, new, 0, ni, nz64, MO_UB)
/* BRANCH AND SAVE */
C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0)
@@ -357,8 +357,8 @@
/* EXCLUSIVE OR IMMEDIATE */
D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020)
D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000)
- C(0x9700, XI, SI, Z, m1_8u, i2_8u, new, m1_8, xor, nz64)
- C(0xeb57, XIY, SIY, LD, m1_8u, i2_8u, new, m1_8, xor, nz64)
+ D(0x9700, XI, SI, Z, la1, i2_8u, new, 0, xi, nz64, MO_UB)
+ D(0xeb57, XIY, SIY, LD, la1, i2_8u, new, 0, xi, nz64, MO_UB)
/* EXECUTE */
C(0x4400, EX, RX_a, Z, 0, a2, 0, 0, ex, 0)
@@ -369,6 +369,8 @@
C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0)
/* EXTRACT CPU ATTRIBUTE */
C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0)
+/* EXTRACT CPU TIME */
+ C(0xc801, ECTG, SSF, ECT, 0, 0, 0, 0, ectg, 0)
/* EXTRACT FPC */
C(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0)
/* EXTRACT PSW */
@@ -698,8 +700,8 @@
D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020)
D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010)
D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000)
- C(0x9600, OI, SI, Z, m1_8u, i2_8u, new, m1_8, or, nz64)
- C(0xeb56, OIY, SIY, LD, m1_8u, i2_8u, new, m1_8, or, nz64)
+ D(0x9600, OI, SI, Z, la1, i2_8u, new, 0, oi, nz64, MO_UB)
+ D(0xeb56, OIY, SIY, LD, la1, i2_8u, new, 0, oi, nz64, MO_UB)
/* PACK */
/* Really format SS_b, but we pack both lengths into one argument
@@ -999,6 +1001,8 @@
C(0xb204, SCK, S, Z, 0, 0, 0, 0, 0, 0)
/* SET CLOCK COMPARATOR */
C(0xb206, SCKC, S, Z, 0, m2_64, 0, 0, sckc, 0)
+/* SET CLOCK PROGRAMMABLE FIELD */
+ C(0x0107, SCKPF, E, Z, 0, 0, 0, 0, sckpf, 0)
/* SET CPU TIMER */
C(0xb208, SPT, S, Z, 0, m2_64, 0, 0, spt, 0)
/* SET PREFIX */
@@ -1052,7 +1056,12 @@
C(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0)
C(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0)
C(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0)
+ C(0xb237, SAL, S, Z, 0, 0, 0, 0, sal, 0)
+ C(0xb23c, SCHM, S, Z, 0, insn, 0, 0, schm, 0)
+ C(0xb274, SIGA, S, Z, 0, 0, 0, 0, siga, 0)
+ C(0xb23a, STCPS, S, Z, 0, 0, 0, 0, stcps, 0)
C(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0)
+ C(0xb239, STCRW, S, Z, 0, insn, 0, 0, stcrw, 0)
C(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0)
C(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0)
/* ??? Not listed in PoO ninth edition, but there's a linux driver that
diff --git a/target/s390x/int_helper.c b/target/s390x/int_helper.c
index 0076bea047..abf77a94e6 100644
--- a/target/s390x/int_helper.c
+++ b/target/s390x/int_helper.c
@@ -39,7 +39,7 @@ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
int64_t q;
if (b == 0) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
ret = q = a / b;
@@ -47,7 +47,7 @@ int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
/* Catch non-representable quotient. */
if (ret != q) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
return ret;
@@ -60,7 +60,7 @@ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
uint64_t q;
if (b == 0) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
ret = q = a / b;
@@ -68,7 +68,7 @@ uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
/* Catch non-representable quotient. */
if (ret != q) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
return ret;
@@ -79,7 +79,7 @@ int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b)
{
/* Catch divide by zero, and non-representable quotient (MIN / -1). */
if (b == 0 || (b == -1 && a == (1ll << 63))) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
env->retxl = a % b;
return a / b;
@@ -92,7 +92,7 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
uint64_t ret;
/* Signal divide by zero. */
if (b == 0) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
if (ah == 0) {
/* 64 -> 64/64 case */
@@ -106,7 +106,7 @@ uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
env->retxl = a % b;
ret = q;
if (ret != q) {
- runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC());
}
#else
S390CPU *cpu = s390_env_get_cpu(env);
diff --git a/target/s390x/internal.h b/target/s390x/internal.h
index 3aff54ada4..1a88e4beb4 100644
--- a/target/s390x/internal.h
+++ b/target/s390x/internal.h
@@ -43,7 +43,7 @@ typedef struct LowCore {
uint8_t pad3[0xc8 - 0xc4]; /* 0x0c4 */
uint32_t stfl_fac_list; /* 0x0c8 */
uint8_t pad4[0xe8 - 0xcc]; /* 0x0cc */
- uint32_t mcck_interruption_code[2]; /* 0x0e8 */
+ uint64_t mcic; /* 0x0e8 */
uint8_t pad5[0xf4 - 0xf0]; /* 0x0f0 */
uint32_t external_damage_code; /* 0x0f4 */
uint64_t failing_storage_address; /* 0x0f8 */
@@ -118,8 +118,8 @@ typedef struct LowCore {
uint32_t fpt_creg_save_area; /* 0x131c */
uint8_t pad16[0x1324 - 0x1320]; /* 0x1320 */
uint32_t tod_progreg_save_area; /* 0x1324 */
- uint32_t cpu_timer_save_area[2]; /* 0x1328 */
- uint32_t clock_comp_save_area[2]; /* 0x1330 */
+ uint64_t cpu_timer_save_area; /* 0x1328 */
+ uint64_t clock_comp_save_area; /* 0x1330 */
uint8_t pad17[0x1340 - 0x1338]; /* 0x1338 */
uint32_t access_regs_save_area[16]; /* 0x1340 */
uint64_t cregs_save_area[16]; /* 0x1380 */
@@ -379,21 +379,23 @@ void cpu_inject_stop(S390CPU *cpu);
/* ioinst.c */
-void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1);
-void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1);
-void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1);
-void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
-void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
-void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
-void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
-int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
-void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
-int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra);
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra);
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra);
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra);
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra);
void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
- uint32_t ipb);
-void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);
-void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1);
-void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1);
+ uint32_t ipb, uintptr_t ra);
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
/* mem_helper.c */
@@ -408,10 +410,9 @@ int mmu_translate_real(CPUS390XState *env, target_ulong raddr, int rw,
/* misc_helper.c */
-void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
- uintptr_t retaddr);
int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
-void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
+ uintptr_t ra);
/* translate.c */
diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c
index ce6177c141..39c026b8b5 100644
--- a/target/s390x/interrupt.c
+++ b/target/s390x/interrupt.c
@@ -27,17 +27,18 @@ void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
}
static void tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
- int ilen)
+ int ilen, uintptr_t ra)
{
#ifdef CONFIG_TCG
trigger_pgm_exception(env, code, ilen);
- cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+ cpu_loop_exit_restore(CPU(s390_env_get_cpu(env)), ra);
#else
g_assert_not_reached();
#endif
}
-void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
+void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen,
+ uintptr_t ra)
{
S390CPU *cpu = s390_env_get_cpu(env);
@@ -47,7 +48,7 @@ void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
if (kvm_enabled()) {
kvm_s390_program_interrupt(cpu, code);
} else if (tcg_enabled()) {
- tcg_s390_program_interrupt(env, code, ilen);
+ tcg_s390_program_interrupt(env, code, ilen, ra);
} else {
g_assert_not_reached();
}
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
index 23962fbebc..83c164a168 100644
--- a/target/s390x/ioinst.c
+++ b/target/s390x/ioinst.c
@@ -38,13 +38,13 @@ int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
return 0;
}
-void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
- program_interrupt(&cpu->env, PGM_OPERAND, 4);
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("xsch", cssid, ssid, schid);
@@ -56,13 +56,13 @@ void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
setcc(cpu, css_do_xsch(sch));
}
-void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
- program_interrupt(&cpu->env, PGM_OPERAND, 4);
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("csch", cssid, ssid, schid);
@@ -74,13 +74,13 @@ void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
setcc(cpu, css_do_csch(sch));
}
-void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
- program_interrupt(&cpu->env, PGM_OPERAND, 4);
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("hsch", cssid, ssid, schid);
@@ -105,7 +105,7 @@ static int ioinst_schib_valid(SCHIB *schib)
return 1;
}
-void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
@@ -116,15 +116,16 @@ void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_schib_valid(&schib)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("msch", cssid, ssid, schid);
@@ -161,7 +162,7 @@ static int ioinst_orb_valid(ORB *orb)
return 1;
}
-void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
@@ -172,16 +173,17 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
copy_orb_from_guest(&orb, &orig_orb);
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
!ioinst_orb_valid(&orb)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("ssch", cssid, ssid, schid);
@@ -193,7 +195,7 @@ void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
setcc(cpu, css_do_ssch(sch, &orb));
}
-void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
{
CRW crw;
uint64_t addr;
@@ -203,7 +205,7 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
@@ -212,13 +214,17 @@ void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
setcc(cpu, cc);
- } else if (cc == 0) {
- /* Write failed: requeue CRW since STCRW is a suppressing instruction */
- css_undo_stcrw(&crw);
+ } else {
+ if (cc == 0) {
+ /* Write failed: requeue CRW since STCRW is suppressing */
+ css_undo_stcrw(&crw);
+ }
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
}
}
-void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb,
+ uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
@@ -230,7 +236,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
@@ -241,7 +247,9 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
* access execption if it is not) first.
*/
if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
+ } else {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
}
return;
}
@@ -267,18 +275,20 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
if (cc != 3) {
if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
sizeof(schib)) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
} else {
/* Access exceptions have a higher priority than cc3 */
if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
}
setcc(cpu, cc);
}
-int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, uintptr_t ra)
{
CPUS390XState *env = &cpu->env;
int cssid, ssid, schid, m;
@@ -289,13 +299,13 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
uint8_t ar;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return -EIO;
}
trace_ioinst_sch_id("tsch", cssid, ssid, schid);
addr = decode_basedisp_s(env, ipb, &ar);
if (addr & 3) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return -EIO;
}
@@ -308,6 +318,7 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
/* 0 - status pending, 1 - not status pending, 3 - not operational */
if (cc != 3) {
if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return -EFAULT;
}
css_do_tsch_update_subch(sch);
@@ -315,6 +326,7 @@ int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
irb_len = sizeof(irb) - sizeof(irb.emw);
/* Access exceptions have a higher priority than cc3 */
if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return -EFAULT;
}
}
@@ -585,7 +597,7 @@ static void ioinst_handle_chsc_unimplemented(ChscResp *res)
res->param = 0;
}
-void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb, uintptr_t ra)
{
ChscReq *req;
ChscResp *res;
@@ -601,7 +613,7 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
addr = env->regs[reg];
/* Page boundary? */
if (addr & 0xfff) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
return;
}
/*
@@ -610,13 +622,14 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
* care of req->len here first.
*/
if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
return;
}
req = (ChscReq *)buf;
len = be16_to_cpu(req->len);
/* Length field valid? */
if ((len < 16) || (len > 4088) || (len & 7)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
@@ -644,42 +657,18 @@ void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
be16_to_cpu(res->len))) {
setcc(cpu, 0); /* Command execution complete */
+ } else {
+ s390_cpu_virt_mem_handle_exc(cpu, ra);
}
}
-int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb)
-{
- CPUS390XState *env = &cpu->env;
- uint64_t addr;
- int lowcore;
- IOIntCode int_code;
- hwaddr len;
- int ret;
- uint8_t ar;
-
- trace_ioinst("tpi");
- addr = decode_basedisp_s(env, ipb, &ar);
- if (addr & 3) {
- program_interrupt(env, PGM_SPECIFICATION, 4);
- return -EIO;
- }
-
- lowcore = addr ? 0 : 1;
- len = lowcore ? 8 /* two words */ : 12 /* three words */;
- ret = css_do_tpi(&int_code, lowcore);
- if (ret == 1) {
- s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len);
- }
- return ret;
-}
-
#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc)
#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28)
#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1)
#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001)
void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
- uint32_t ipb)
+ uint32_t ipb, uintptr_t ra)
{
uint8_t mbk;
int update;
@@ -689,7 +678,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
trace_ioinst("schm");
if (SCHM_REG1_RES(reg1)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
@@ -698,20 +687,20 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
dct = SCHM_REG1_DCT(reg1);
if (update && (reg2 & 0x000000000000001f)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
css_do_schm(mbk, update, dct, update ? reg2 : 0);
}
-void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cssid, ssid, schid, m;
SubchDev *sch;
if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
- program_interrupt(&cpu->env, PGM_OPERAND, 4);
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
return;
}
trace_ioinst_sch_id("rsch", cssid, ssid, schid);
@@ -726,7 +715,7 @@ void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16)
#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff)
-void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
int cc;
uint8_t cssid;
@@ -735,7 +724,7 @@ void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
CPUS390XState *env = &cpu->env;
if (RCHP_REG1_RES(reg1)) {
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
@@ -758,17 +747,17 @@ void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
break;
default:
/* Invalid channel subsystem. */
- program_interrupt(env, PGM_OPERAND, 4);
+ s390_program_interrupt(env, PGM_OPERAND, 4, ra);
return;
}
setcc(cpu, cc);
}
#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000)
-void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1)
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra)
{
/* We do not provide address limit checking, so let's suppress it. */
if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
- program_interrupt(&cpu->env, PGM_OPERAND, 4);
+ s390_program_interrupt(&cpu->env, PGM_OPERAND, 4, ra);
}
}
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index b03f583032..9b8b59f2a2 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -1124,32 +1124,32 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
switch (ipa1) {
case PRIV_B2_XSCH:
- ioinst_handle_xsch(cpu, env->regs[1]);
+ ioinst_handle_xsch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_CSCH:
- ioinst_handle_csch(cpu, env->regs[1]);
+ ioinst_handle_csch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_HSCH:
- ioinst_handle_hsch(cpu, env->regs[1]);
+ ioinst_handle_hsch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_MSCH:
- ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
+ ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_SSCH:
- ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
+ ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_STCRW:
- ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
+ ioinst_handle_stcrw(cpu, run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_STSCH:
- ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
+ ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_TSCH:
/* We should only get tsch via KVM_EXIT_S390_TSCH. */
fprintf(stderr, "Spurious tsch intercept\n");
break;
case PRIV_B2_CHSC:
- ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
+ ioinst_handle_chsc(cpu, run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_TPI:
/* This should have been handled by kvm already. */
@@ -1157,19 +1157,19 @@ static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
break;
case PRIV_B2_SCHM:
ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
- run->s390_sieic.ipb);
+ run->s390_sieic.ipb, RA_IGNORED);
break;
case PRIV_B2_RSCH:
- ioinst_handle_rsch(cpu, env->regs[1]);
+ ioinst_handle_rsch(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_RCHP:
- ioinst_handle_rchp(cpu, env->regs[1]);
+ ioinst_handle_rchp(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_STCPS:
/* We do not provide this instruction, it is suppressed. */
break;
case PRIV_B2_SAL:
- ioinst_handle_sal(cpu, env->regs[1]);
+ ioinst_handle_sal(cpu, env->regs[1], RA_IGNORED);
break;
case PRIV_B2_SIGA:
/* Not provided, set CC = 3 for subchannel not operational */
@@ -1230,7 +1230,7 @@ static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
- return clp_service_call(cpu, r2);
+ return clp_service_call(cpu, r2, RA_IGNORED);
} else {
return -1;
}
@@ -1242,7 +1242,7 @@ static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
- return pcilg_service_call(cpu, r1, r2);
+ return pcilg_service_call(cpu, r1, r2, RA_IGNORED);
} else {
return -1;
}
@@ -1254,7 +1254,7 @@ static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
- return pcistg_service_call(cpu, r1, r2);
+ return pcistg_service_call(cpu, r1, r2, RA_IGNORED);
} else {
return -1;
}
@@ -1270,7 +1270,7 @@ static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
fiba = get_base_disp_rxy(cpu, run, &ar);
- return stpcifc_service_call(cpu, r1, fiba, ar);
+ return stpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
} else {
return -1;
}
@@ -1302,7 +1302,7 @@ static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
if (s390_has_feat(S390_FEAT_ZPCI)) {
- return rpcit_service_call(cpu, r1, r2);
+ return rpcit_service_call(cpu, r1, r2, RA_IGNORED);
} else {
return -1;
}
@@ -1319,7 +1319,7 @@ static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
gaddr = get_base_disp_rsy(cpu, run, &ar);
- return pcistb_service_call(cpu, r1, r3, gaddr, ar);
+ return pcistb_service_call(cpu, r1, r3, gaddr, ar, RA_IGNORED);
} else {
return -1;
}
@@ -1335,7 +1335,7 @@ static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
fiba = get_base_disp_rxy(cpu, run, &ar);
- return mpcifc_service_call(cpu, r1, fiba, ar);
+ return mpcifc_service_call(cpu, r1, fiba, ar, RA_IGNORED);
} else {
return -1;
}
@@ -1451,7 +1451,7 @@ static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
cpu_synchronize_state(CPU(cpu));
r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
r3 = run->s390_sieic.ipa & 0x000f;
- handle_diag_308(&cpu->env, r1, r3);
+ handle_diag_308(&cpu->env, r1, r3, RA_IGNORED);
}
static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
@@ -1673,7 +1673,8 @@ static int handle_tsch(S390CPU *cpu)
cpu_synchronize_state(cs);
- ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
+ ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb,
+ RA_IGNORED);
if (ret < 0) {
/*
* Failure.
@@ -1851,33 +1852,12 @@ void kvm_s390_io_interrupt(uint16_t subchannel_id,
kvm_s390_floating_interrupt(&irq);
}
-static uint64_t build_channel_report_mcic(void)
-{
- uint64_t mcic;
-
- /* subclass: indicate channel report pending */
- mcic = MCIC_SC_CP |
- /* subclass modifiers: none */
- /* storage errors: none */
- /* validity bits: no damage */
- MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
- MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
- MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
- if (s390_has_feat(S390_FEAT_VECTOR)) {
- mcic |= MCIC_VB_VR;
- }
- if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
- mcic |= MCIC_VB_GS;
- }
- return mcic;
-}
-
void kvm_s390_crw_mchk(void)
{
struct kvm_s390_irq irq = {
.type = KVM_S390_MCHK,
- .u.mchk.cr14 = 1 << 28,
- .u.mchk.mcic = build_channel_report_mcic(),
+ .u.mchk.cr14 = CR14_CHANNEL_REPORT_SC,
+ .u.mchk.mcic = s390_build_validity_mcic() | MCIC_SC_CP,
};
kvm_s390_floating_interrupt(&irq);
}
@@ -1979,7 +1959,10 @@ int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
{
- struct kvm_s390_irq_state irq_state;
+ struct kvm_s390_irq_state irq_state = {
+ .buf = (uint64_t) cpu->irqstate,
+ .len = VCPU_IRQ_BUF_SIZE,
+ };
CPUState *cs = CPU(cpu);
int32_t bytes;
@@ -1987,9 +1970,6 @@ void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
return;
}
- irq_state.buf = (uint64_t) cpu->irqstate;
- irq_state.len = VCPU_IRQ_BUF_SIZE;
-
bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
if (bytes < 0) {
cpu->irqstate_saved_size = 0;
@@ -2003,7 +1983,10 @@ void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
- struct kvm_s390_irq_state irq_state;
+ struct kvm_s390_irq_state irq_state = {
+ .buf = (uint64_t) cpu->irqstate,
+ .len = cpu->irqstate_saved_size,
+ };
int r;
if (cpu->irqstate_saved_size == 0) {
@@ -2014,9 +1997,6 @@ int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
return -ENOSYS;
}
- irq_state.buf = (uint64_t) cpu->irqstate;
- irq_state.len = cpu->irqstate_saved_size;
-
r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
if (r) {
error_report("Setting interrupt state failed %d", r);
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
index a1652d4849..2625d843b3 100644
--- a/target/s390x/mem_helper.c
+++ b/target/s390x/mem_helper.c
@@ -85,9 +85,7 @@ static inline void check_alignment(CPUS390XState *env, uint64_t v,
int wordsize, uintptr_t ra)
{
if (v % wordsize) {
- CPUState *cs = CPU(s390_env_get_cpu(env));
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
}
}
@@ -545,8 +543,7 @@ void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
/* Bits 32-55 must contain all 0. */
if (env->regs[0] & 0xffffff00u) {
- cpu_restore_state(ENV_GET_CPU(env), ra);
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
}
str = get_address(env, r2);
@@ -583,8 +580,7 @@ void HELPER(srstu)(CPUS390XState *env, uint32_t r1, uint32_t r2)
/* Bits 32-47 of R0 must be zero. */
if (env->regs[0] & 0xffff0000u) {
- cpu_restore_state(ENV_GET_CPU(env), ra);
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
}
str = get_address(env, r2);
@@ -1600,8 +1596,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1,
return cc;
spec_exception:
- cpu_restore_state(ENV_GET_CPU(env), ra);
- program_interrupt(env, PGM_SPECIFICATION, 6);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 6, ra);
g_assert_not_reached();
}
@@ -1865,8 +1860,7 @@ void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4)
uint16_t entries, i, index = 0;
if (r2 & 0xff000) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
if (!(r2 & 0x800)) {
@@ -2014,8 +2008,7 @@ uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
/* XXX incomplete - has more corner cases */
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
- cpu_restore_state(cs, GETPC());
- program_interrupt(env, PGM_SPECIAL_OP, 2);
+ s390_program_interrupt(env, PGM_SPECIAL_OP, 2, GETPC());
}
old_exc = cs->exception_index;
@@ -2185,7 +2178,6 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
const uint8_t psw_as = (env->psw.mask & PSW_MASK_ASC) >> PSW_SHIFT_ASC;
const uint64_t r0 = env->regs[0];
const uintptr_t ra = GETPC();
- CPUState *cs = CPU(s390_env_get_cpu(env));
uint8_t dest_key, dest_as, dest_k, dest_a;
uint8_t src_key, src_as, src_k, src_a;
uint64_t val;
@@ -2195,8 +2187,7 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
__func__, dest, src, len);
if (!(env->psw.mask & PSW_MASK_DAT)) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIAL_OP, 6);
+ s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
}
/* OAC (operand access control) for the first operand -> dest */
@@ -2227,17 +2218,14 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
}
if (dest_a && dest_as == AS_HOME && (env->psw.mask & PSW_MASK_PSTATE)) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIAL_OP, 6);
+ s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
}
if (!(env->cregs[0] & CR0_SECONDARY) &&
(dest_as == AS_SECONDARY || src_as == AS_SECONDARY)) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_SPECIAL_OP, 6);
+ s390_program_interrupt(env, PGM_SPECIAL_OP, 6, ra);
}
if (!psw_key_valid(env, dest_key) || !psw_key_valid(env, src_key)) {
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_PRIVILEGED, 6);
+ s390_program_interrupt(env, PGM_PRIVILEGED, 6, ra);
}
len = wrap_length(env, len);
@@ -2251,8 +2239,7 @@ uint32_t HELPER(mvcos)(CPUS390XState *env, uint64_t dest, uint64_t src,
(env->psw.mask & PSW_MASK_PSTATE)) {
qemu_log_mask(LOG_UNIMP, "%s: AR-mode and PSTATE support missing\n",
__func__);
- cpu_restore_state(cs, ra);
- program_interrupt(env, PGM_ADDRESSING, 6);
+ s390_program_interrupt(env, PGM_ADDRESSING, 6, ra);
}
/* FIXME: a) LAP
diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c
index d272851e1c..86da6aab7e 100644
--- a/target/s390x/misc_helper.c
+++ b/target/s390x/misc_helper.c
@@ -45,22 +45,6 @@
#define HELPER_LOG(x...)
#endif
-/* Raise an exception dynamically from a helper function. */
-void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
- uintptr_t retaddr)
-{
- CPUState *cs = CPU(s390_env_get_cpu(env));
-
- cs->exception_index = EXCP_PGM;
- env->int_pgm_code = excp;
- env->int_pgm_ilen = ILEN_AUTO;
-
- /* Use the (ultimate) callers address to find the insn that trapped. */
- cpu_restore_state(cs, retaddr);
-
- cpu_loop_exit(cs);
-}
-
/* Raise an exception statically from a TB. */
void HELPER(exception)(CPUS390XState *env, uint32_t excp)
{
@@ -71,6 +55,21 @@ void HELPER(exception)(CPUS390XState *env, uint32_t excp)
cpu_loop_exit(cs);
}
+/* Store CPU Timer (also used for EXTRACT CPU TIME) */
+uint64_t HELPER(stpt)(CPUS390XState *env)
+{
+#if defined(CONFIG_USER_ONLY)
+ /*
+ * Fake a descending CPU timer. We could get negative values here,
+ * but we don't care as it is up to the OS when to process that
+ * interrupt and reset to > 0.
+ */
+ return UINT64_MAX - (uint64_t)cpu_get_host_ticks();
+#else
+ return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+#endif
+}
+
#ifndef CONFIG_USER_ONLY
/* SCLP service call */
@@ -78,11 +77,10 @@ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
{
qemu_mutex_lock_iothread();
int r = sclp_service_call(env, r1, r2);
+ qemu_mutex_unlock_iothread();
if (r < 0) {
- program_interrupt(env, -r, 4);
- r = 0;
+ s390_program_interrupt(env, -r, 4, GETPC());
}
- qemu_mutex_unlock_iothread();
return r;
}
@@ -104,7 +102,7 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
case 0x308:
/* ipl */
qemu_mutex_lock_iothread();
- handle_diag_308(env, r1, r3);
+ handle_diag_308(env, r1, r3, GETPC());
qemu_mutex_unlock_iothread();
r = 0;
break;
@@ -118,7 +116,7 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
}
if (r) {
- program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO);
+ s390_program_interrupt(env, PGM_SPECIFICATION, ILEN_AUTO, GETPC());
}
}
@@ -163,6 +161,17 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t time)
timer_mod(env->tod_timer, env->tod_basetime + time);
}
+/* Set Tod Programmable Field */
+void HELPER(sckpf)(CPUS390XState *env, uint64_t r0)
+{
+ uint32_t val = r0;
+
+ if (val & 0xffff0000) {
+ s390_program_interrupt(env, PGM_SPECIFICATION, 2, GETPC());
+ }
+ env->todpr = val;
+}
+
/* Store Clock Comparator */
uint64_t HELPER(stckc)(CPUS390XState *env)
{
@@ -184,12 +193,6 @@ void HELPER(spt)(CPUS390XState *env, uint64_t time)
timer_mod(env->cpu_timer, env->cputm);
}
-/* Store CPU Timer */
-uint64_t HELPER(stpt)(CPUS390XState *env)
-{
- return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
-}
-
/* Store System Information */
uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
uint64_t r0, uint64_t r1)
@@ -201,7 +204,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
/* valid function code, invalid reserved bits */
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, GETPC());
}
sel1 = r0 & STSI_R0_SEL1_MASK;
@@ -339,7 +342,7 @@ void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_xsch(cpu, r1);
+ ioinst_handle_xsch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -347,7 +350,7 @@ void HELPER(csch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_csch(cpu, r1);
+ ioinst_handle_csch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -355,7 +358,7 @@ void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_hsch(cpu, r1);
+ ioinst_handle_hsch(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -363,7 +366,7 @@ void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_msch(cpu, r1, inst >> 16);
+ ioinst_handle_msch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -371,7 +374,7 @@ void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_rchp(cpu, r1);
+ ioinst_handle_rchp(cpu, r1, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -379,7 +382,25 @@ void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_rsch(cpu, r1);
+ ioinst_handle_rsch(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(sal)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ qemu_mutex_lock_iothread();
+ ioinst_handle_sal(cpu, r1, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ qemu_mutex_lock_iothread();
+ ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -387,7 +408,16 @@ void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_ssch(cpu, r1, inst >> 16);
+ ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC());
+ qemu_mutex_unlock_iothread();
+}
+
+void HELPER(stcrw)(CPUS390XState *env, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ qemu_mutex_lock_iothread();
+ ioinst_handle_stcrw(cpu, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -395,7 +425,7 @@ void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_stsch(cpu, r1, inst >> 16);
+ ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -403,7 +433,7 @@ void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_tsch(cpu, r1, inst >> 16);
+ ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
@@ -411,7 +441,7 @@ void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
{
S390CPU *cpu = s390_env_get_cpu(env);
qemu_mutex_lock_iothread();
- ioinst_handle_chsc(cpu, inst >> 16);
+ ioinst_handle_chsc(cpu, inst >> 16, GETPC());
qemu_mutex_unlock_iothread();
}
#endif
@@ -429,7 +459,7 @@ void HELPER(per_check_exception)(CPUS390XState *env)
* of EXECUTE, while per_address contains the target of EXECUTE.
*/
ilen = get_ilen(cpu_ldub_code(env, env->per_address));
- program_interrupt(env, PGM_PER, ilen);
+ s390_program_interrupt(env, PGM_PER, ilen, GETPC());
}
}
@@ -519,8 +549,7 @@ uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr)
int i;
if (addr & 0x7) {
- cpu_restore_state(ENV_GET_CPU(env), ra);
- program_interrupt(env, PGM_SPECIFICATION, 4);
+ s390_program_interrupt(env, PGM_SPECIFICATION, 4, ra);
}
prepare_stfl();
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
index 31e3f3f415..f477cc006a 100644
--- a/target/s390x/mmu_helper.c
+++ b/target/s390x/mmu_helper.c
@@ -22,6 +22,7 @@
#include "internal.h"
#include "kvm_s390x.h"
#include "sysemu/kvm.h"
+#include "exec/exec-all.h"
#include "trace.h"
#include "hw/s390x/storage-keys.h"
@@ -63,7 +64,9 @@ static void trigger_access_exception(CPUS390XState *env, uint32_t type,
kvm_s390_access_exception(cpu, type, tec);
} else {
CPUState *cs = CPU(cpu);
- stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
+ if (type != PGM_ADDRESSING) {
+ stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
+ }
trigger_pgm_exception(env, type, ilen);
}
}
@@ -442,7 +445,8 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
/**
* translate_pages: Translate a set of consecutive logical page addresses
- * to absolute addresses
+ * to absolute addresses. This function is used for TCG and old KVM without
+ * the MEMOP interface.
*/
static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
target_ulong *pages, bool is_write)
@@ -458,7 +462,7 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
}
if (!address_space_access_valid(&address_space_memory, pages[i],
TARGET_PAGE_SIZE, is_write)) {
- program_interrupt(env, PGM_ADDRESSING, ILEN_AUTO);
+ trigger_access_exception(env, PGM_ADDRESSING, ILEN_AUTO, 0);
return -EFAULT;
}
addr += TARGET_PAGE_SIZE;
@@ -478,6 +482,9 @@ static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
*
* Copy from/to guest memory using logical addresses. Note that we inject a
* program interrupt in case there is an error while accessing the memory.
+ *
+ * This function will always return (also for TCG), make sure to call
+ * s390_cpu_virt_mem_handle_exc() to properly exit the CPU loop.
*/
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
int len, bool is_write)
@@ -514,6 +521,16 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
return ret;
}
+void s390_cpu_virt_mem_handle_exc(S390CPU *cpu, uintptr_t ra)
+{
+ /* KVM will handle the interrupt automatically, TCG has to exit the TB */
+#ifdef CONFIG_TCG
+ if (tcg_enabled()) {
+ cpu_loop_exit_restore(CPU(cpu), ra);
+ }
+#endif
+}
+
/**
* Translate a real address into a physical (absolute) address.
* @param raddr the real address
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
index 85d0a6c3af..ac55886792 100644
--- a/target/s390x/translate.c
+++ b/target/s390x/translate.c
@@ -240,12 +240,6 @@ static void update_cc_op(DisasContext *s)
}
}
-static void potential_page_fault(DisasContext *s)
-{
- update_psw_addr(s);
- update_cc_op(s);
-}
-
static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
{
return (uint64_t)cpu_lduw_code(env, pc);
@@ -440,11 +434,9 @@ static void set_cc_static(DisasContext *s)
/* calculates cc into cc_op */
static void gen_op_calc_cc(DisasContext *s)
{
- TCGv_i32 local_cc_op;
- TCGv_i64 dummy;
+ TCGv_i32 local_cc_op = NULL;
+ TCGv_i64 dummy = NULL;
- TCGV_UNUSED_I32(local_cc_op);
- TCGV_UNUSED_I64(dummy);
switch (s->cc_op) {
default:
dummy = tcg_const_i64(0);
@@ -534,10 +526,10 @@ static void gen_op_calc_cc(DisasContext *s)
tcg_abort();
}
- if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
+ if (local_cc_op) {
tcg_temp_free_i32(local_cc_op);
}
- if (!TCGV_IS_UNUSED_I64(dummy)) {
+ if (dummy) {
tcg_temp_free_i64(dummy);
}
@@ -1195,7 +1187,7 @@ static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
goto egress;
}
} else {
- if (TCGV_IS_UNUSED_I64(cdest)) {
+ if (!cdest) {
/* E.g. bcr %r0 -> no branch. */
ret = NO_EXIT;
goto egress;
@@ -1370,6 +1362,27 @@ static ExitStatus op_addc(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_asi(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_STFLE_45)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic addition in memory. */
+ tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_STFLE_45)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return NO_EXIT;
+}
+
static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
{
gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
@@ -1412,10 +1425,31 @@ static ExitStatus op_andi(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_ni(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic operation in memory. */
+ tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return NO_EXIT;
+}
+
static ExitStatus op_bas(DisasContext *s, DisasOps *o)
{
tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
- if (!TCGV_IS_UNUSED_I64(o->in2)) {
+ if (o->in2) {
tcg_gen_mov_i64(psw_addr, o->in2);
per_branch(s, false);
return EXIT_PC_UPDATED;
@@ -2124,9 +2158,6 @@ static ExitStatus op_diag(DisasContext *s, DisasOps *o)
TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
check_privileged(s);
- update_psw_addr(s);
- gen_op_calc_cc(s);
-
gen_helper_diag(cpu_env, r1, r3, func_code);
tcg_temp_free_i32(func_code);
@@ -2942,7 +2973,8 @@ static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
/* In a parallel context, stop the world and single step. */
if (tb_cflags(s->tb) & CF_PARALLEL) {
- potential_page_fault(s);
+ update_psw_addr(s);
+ update_cc_op(s);
gen_exception(EXCP_ATOMIC);
return EXIT_NORETURN;
}
@@ -2997,7 +3029,7 @@ static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
{
o->out = o->in2;
o->g_out = o->g_in2;
- TCGV_UNUSED_I64(o->in2);
+ o->in2 = NULL;
o->g_in2 = false;
return NO_EXIT;
}
@@ -3009,7 +3041,7 @@ static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
o->out = o->in2;
o->g_out = o->g_in2;
- TCGV_UNUSED_I64(o->in2);
+ o->in2 = NULL;
o->g_in2 = false;
switch (s->tb->flags & FLAG_MASK_ASC) {
@@ -3043,8 +3075,8 @@ static ExitStatus op_movx(DisasContext *s, DisasOps *o)
o->out2 = o->in2;
o->g_out = o->g_in1;
o->g_out2 = o->g_in2;
- TCGV_UNUSED_I64(o->in1);
- TCGV_UNUSED_I64(o->in2);
+ o->in1 = NULL;
+ o->in2 = NULL;
o->g_in1 = o->g_in2 = false;
return NO_EXIT;
}
@@ -3365,6 +3397,27 @@ static ExitStatus op_ori(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_oi(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic operation in memory. */
+ tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return NO_EXIT;
+}
+
static ExitStatus op_pack(DisasContext *s, DisasOps *o)
{
TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
@@ -3704,7 +3757,6 @@ static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
static ExitStatus op_servc(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
set_cc_static(s);
return NO_EXIT;
@@ -3863,6 +3915,36 @@ static ExitStatus op_spm(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_ectg(DisasContext *s, DisasOps *o)
+{
+ int b1 = get_field(s->fields, b1);
+ int d1 = get_field(s->fields, d1);
+ int b2 = get_field(s->fields, b2);
+ int d2 = get_field(s->fields, d2);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* fetch all operands first */
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_addi_i64(o->in1, regs[b1], d1);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_addi_i64(o->in2, regs[b2], d2);
+ o->addr1 = get_address(s, 0, r3, 0);
+
+ /* load the third operand into r3 before modifying anything */
+ tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s));
+
+ /* subtract CPU timer from first operand and store in GR0 */
+ gen_helper_stpt(tmp, cpu_env);
+ tcg_gen_sub_i64(regs[0], o->in1, tmp);
+
+ /* store second operand in GR1 */
+ tcg_gen_mov_i64(regs[1], o->in2);
+
+ tcg_temp_free_i64(tmp);
+ return NO_EXIT;
+}
+
#ifndef CONFIG_USER_ONLY
static ExitStatus op_spka(DisasContext *s, DisasOps *o)
{
@@ -3906,7 +3988,10 @@ static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
{
TCGv_i64 c1 = tcg_temp_new_i64();
TCGv_i64 c2 = tcg_temp_new_i64();
+ TCGv_i64 todpr = tcg_temp_new_i64();
gen_helper_stck(c1, cpu_env);
+ /* 16 bit value store in an uint32_t (only valid bits set) */
+ tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr));
/* Shift the 64-bit value into its place as a zero-extended
104-bit value. Note that "bit positions 64-103 are always
non-zero so that they compare differently to STCK"; we set
@@ -3914,11 +3999,13 @@ static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
tcg_gen_shli_i64(c2, c1, 56);
tcg_gen_shri_i64(c1, c1, 8);
tcg_gen_ori_i64(c2, c2, 0x10000);
+ tcg_gen_or_i64(c2, c2, todpr);
tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
tcg_temp_free_i64(c1);
tcg_temp_free_i64(c2);
+ tcg_temp_free_i64(todpr);
/* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0);
return NO_EXIT;
@@ -3931,6 +4018,13 @@ static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_sckpf(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_sckpf(cpu_env, regs[0]);
+ return NO_EXIT;
+}
+
static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
{
check_privileged(s);
@@ -3992,7 +4086,6 @@ static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
set_cc_static(s);
return NO_EXIT;
@@ -4008,7 +4101,6 @@ static ExitStatus op_spx(DisasContext *s, DisasOps *o)
static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_xsch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@@ -4017,7 +4109,6 @@ static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
static ExitStatus op_csch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_csch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@@ -4026,7 +4117,6 @@ static ExitStatus op_csch(DisasContext *s, DisasOps *o)
static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_hsch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@@ -4035,7 +4125,6 @@ static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
static ExitStatus op_msch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_msch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -4044,7 +4133,6 @@ static ExitStatus op_msch(DisasContext *s, DisasOps *o)
static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_rchp(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
@@ -4053,16 +4141,43 @@ static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_rsch(cpu_env, regs[1]);
set_cc_static(s);
return NO_EXIT;
}
+static ExitStatus op_sal(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_sal(cpu_env, regs[1]);
+ return NO_EXIT;
+}
+
+static ExitStatus op_schm(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_schm(cpu_env, regs[1], regs[2], o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_siga(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ /* From KVM code: Not provided, set CC = 3 for subchannel not operational */
+ gen_op_movi_cc(s, 3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stcps(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ /* The instruction is suppressed if not provided. */
+ return NO_EXIT;
+}
+
static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_ssch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -4071,16 +4186,22 @@ static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_stsch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
}
+static ExitStatus op_stcrw(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_stcrw(cpu_env, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_tsch(cpu_env, regs[1], o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -4089,7 +4210,6 @@ static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
{
check_privileged(s);
- potential_page_fault(s);
gen_helper_chsc(cpu_env, o->in2);
set_cc_static(s);
return NO_EXIT;
@@ -4622,6 +4742,27 @@ static ExitStatus op_xori(DisasContext *s, DisasOps *o)
return NO_EXIT;
}
+static ExitStatus op_xi(DisasContext *s, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data);
+ } else {
+ /* Perform the atomic operation in memory. */
+ tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s),
+ s->insn->data);
+ }
+
+ /* Recompute also for atomic case: needed for setting CC. */
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+
+ if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) {
+ tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data);
+ }
+ return NO_EXIT;
+}
+
static ExitStatus op_zero(DisasContext *s, DisasOps *o)
{
o->out = tcg_const_i64(0);
@@ -5566,6 +5707,7 @@ enum DisasInsnEnum {
#define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
#define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
#define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
+#define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME
static const DisasInsn insn_info[] = {
#include "insn-data.def"
@@ -5801,11 +5943,11 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
s->insn = insn;
s->fields = &f;
o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
- TCGV_UNUSED_I64(o.out);
- TCGV_UNUSED_I64(o.out2);
- TCGV_UNUSED_I64(o.in1);
- TCGV_UNUSED_I64(o.in2);
- TCGV_UNUSED_I64(o.addr1);
+ o.out = NULL;
+ o.out2 = NULL;
+ o.in1 = NULL;
+ o.in2 = NULL;
+ o.addr1 = NULL;
/* Implement the instruction. */
if (insn->help_in1) {
@@ -5828,19 +5970,19 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
}
/* Free any temporaries created by the helpers. */
- if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
+ if (o.out && !o.g_out) {
tcg_temp_free_i64(o.out);
}
- if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
+ if (o.out2 && !o.g_out2) {
tcg_temp_free_i64(o.out2);
}
- if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
+ if (o.in1 && !o.g_in1) {
tcg_temp_free_i64(o.in1);
}
- if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
+ if (o.in2 && !o.g_in2) {
tcg_temp_free_i64(o.in2);
}
- if (!TCGV_IS_UNUSED_I64(o.addr1)) {
+ if (o.addr1) {
tcg_temp_free_i64(o.addr1);
}
@@ -5851,9 +5993,6 @@ static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
tcg_gen_movi_i64(psw_addr, s->next_pc);
}
- /* Save off cc. */
- update_cc_op(s);
-
/* Call the helper to check for a possible PER exception. */
gen_helper_per_check_exception(cpu_env);
}
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
index 960b46870d..a2c26e0597 100644
--- a/target/sh4/cpu.h
+++ b/target/sh4/cpu.h
@@ -188,7 +188,9 @@ typedef struct CPUSH4State {
tlb_t itlb[ITLB_SIZE]; /* instruction translation table */
tlb_t utlb[UTLB_SIZE]; /* unified translation table */
- uint32_t ldst;
+ /* LDST = LOCK_ADDR != -1. */
+ uint32_t lock_addr;
+ uint32_t lock_value;
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
index 28d93c2543..680b583e53 100644
--- a/target/sh4/helper.c
+++ b/target/sh4/helper.c
@@ -171,6 +171,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
env->spc = env->pc;
env->sgr = env->gregs[15];
env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
+ env->lock_addr = -1;
if (env->flags & DELAY_SLOT_MASK) {
/* Branch instruction should be executed again before delay slot. */
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
index 703020fe87..012156b97b 100644
--- a/target/sh4/translate.c
+++ b/target/sh4/translate.c
@@ -25,28 +25,27 @@
#include "exec/exec-all.h"
#include "tcg-op.h"
#include "exec/cpu_ldst.h"
-
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
-
+#include "exec/translator.h"
#include "trace-tcg.h"
#include "exec/log.h"
typedef struct DisasContext {
- struct TranslationBlock *tb;
- target_ulong pc;
- uint16_t opcode;
- uint32_t tbflags; /* should stay unmodified during the TB translation */
- uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
- int bstate;
+ DisasContextBase base;
+
+ uint32_t tbflags; /* should stay unmodified during the TB translation */
+ uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
int memidx;
int gbank;
int fbank;
uint32_t delayed_pc;
- int singlestep_enabled;
uint32_t features;
- int has_movcal;
+
+ uint16_t opcode;
+
+ bool has_movcal;
} DisasContext;
#if defined(CONFIG_USER_ONLY)
@@ -55,21 +54,18 @@ typedef struct DisasContext {
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
#endif
-enum {
- BS_NONE = 0, /* We go out of the TB without reaching a branch or an
- * exception condition
- */
- BS_STOP = 1, /* We want to stop translation for any reason */
- BS_BRANCH = 2, /* We reached a branch condition */
- BS_EXCP = 3, /* We reached an exception condition */
-};
+/* Target-specific values for ctx->base.is_jmp. */
+/* We want to exit back to the cpu loop for some reason.
+ Usually this is to recognize interrupts immediately. */
+#define DISAS_STOP DISAS_TARGET_0
/* global register indexes */
static TCGv cpu_gregs[32];
static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
-static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
+static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
+static TCGv cpu_lock_addr, cpu_lock_value;
static TCGv cpu_fregs[32];
/* internal register indexes */
@@ -147,8 +143,12 @@ void sh4_translate_init(void)
offsetof(CPUSH4State,
delayed_cond),
"_delayed_cond_");
- cpu_ldst = tcg_global_mem_new_i32(cpu_env,
- offsetof(CPUSH4State, ldst), "_ldst_");
+ cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, lock_addr),
+ "_lock_addr_");
+ cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, lock_value),
+ "_lock_value_");
for (i = 0; i < 32; i++)
cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
@@ -209,7 +209,7 @@ static void gen_write_sr(TCGv src)
static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
{
if (save_pc) {
- tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
}
if (ctx->delayed_pc != (uint32_t) -1) {
tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
@@ -227,11 +227,11 @@ static inline bool use_exit_tb(DisasContext *ctx)
static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
{
/* Use a direct jump if in same page and singlestep not enabled */
- if (unlikely(ctx->singlestep_enabled || use_exit_tb(ctx))) {
+ if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
return false;
}
#ifndef CONFIG_USER_ONLY
- return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+ return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
#else
return true;
#endif
@@ -242,10 +242,10 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(cpu_pc, dest);
- tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+ tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n);
} else {
tcg_gen_movi_i32(cpu_pc, dest);
- if (ctx->singlestep_enabled) {
+ if (ctx->base.singlestep_enabled) {
gen_helper_debug(cpu_env);
} else if (use_exit_tb(ctx)) {
tcg_gen_exit_tb(0);
@@ -253,6 +253,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
tcg_gen_lookup_and_goto_ptr();
}
}
+ ctx->base.is_jmp = DISAS_NORETURN;
}
static void gen_jump(DisasContext * ctx)
@@ -262,13 +263,14 @@ static void gen_jump(DisasContext * ctx)
delayed jump as immediate jump are conditinal jumps */
tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
tcg_gen_discard_i32(cpu_delayed_pc);
- if (ctx->singlestep_enabled) {
+ if (ctx->base.singlestep_enabled) {
gen_helper_debug(cpu_env);
} else if (use_exit_tb(ctx)) {
tcg_gen_exit_tb(0);
} else {
tcg_gen_lookup_and_goto_ptr();
}
+ ctx->base.is_jmp = DISAS_NORETURN;
} else {
gen_goto_tb(ctx, 0, ctx->delayed_pc);
}
@@ -298,8 +300,8 @@ static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
gen_goto_tb(ctx, 0, dest);
gen_set_label(l1);
- gen_goto_tb(ctx, 1, ctx->pc + 2);
- ctx->bstate = BS_BRANCH;
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
+ ctx->base.is_jmp = DISAS_NORETURN;
}
/* Delayed conditional jump (bt or bf) */
@@ -322,11 +324,12 @@ static void gen_delayed_conditional_jump(DisasContext * ctx)
gen_jump(ctx);
gen_set_label(l1);
+ ctx->base.is_jmp = DISAS_NEXT;
return;
}
tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
- gen_goto_tb(ctx, 1, ctx->pc + 2);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
gen_set_label(l1);
gen_jump(ctx);
}
@@ -463,7 +466,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
ctx->envflags |= DELAY_SLOT_RTE;
ctx->delayed_pc = (uint32_t) - 1;
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
return;
case 0x0058: /* sets */
tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
@@ -474,23 +477,23 @@ static void _decode_opc(DisasContext * ctx)
case 0xfbfd: /* frchg */
CHECK_FPSCR_PR_0
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
return;
case 0xf3fd: /* fschg */
CHECK_FPSCR_PR_0
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
return;
case 0xf7fd: /* fpchg */
CHECK_SH4A
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
return;
case 0x0009: /* nop */
return;
case 0x001b: /* sleep */
CHECK_PRIVILEGED
- tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
+ tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
gen_helper_sleep(cpu_env);
return;
}
@@ -517,23 +520,24 @@ static void _decode_opc(DisasContext * ctx)
/* Detect the start of a gUSA region. If so, update envflags
and end the TB. This will allow us to see the end of the
region (stored in R0) in the next TB. */
- if (B11_8 == 15 && B7_0s < 0 && (tb_cflags(ctx->tb) & CF_PARALLEL)) {
+ if (B11_8 == 15 && B7_0s < 0 &&
+ (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
}
#endif
tcg_gen_movi_i32(REG(B11_8), B7_0s);
return;
case 0x9000: /* mov.w @(disp,PC),Rn */
{
- TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
+ TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
tcg_temp_free(addr);
}
return;
case 0xd000: /* mov.l @(disp,PC),Rn */
{
- TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
+ TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
tcg_temp_free(addr);
}
@@ -543,13 +547,13 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0xa000: /* bra disp */
CHECK_NOT_DELAY_SLOT
- ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ctx->envflags |= DELAY_SLOT;
return;
case 0xb000: /* bsr disp */
CHECK_NOT_DELAY_SLOT
- tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
- ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
+ tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ctx->envflags |= DELAY_SLOT;
return;
}
@@ -601,6 +605,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_subi_i32(addr, REG(B11_8), 4);
tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
}
return;
case 0x6004: /* mov.b @Rm+,Rn */
@@ -668,7 +673,7 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0x6008: /* swap.b Rm,Rn */
{
- TCGv low = tcg_temp_new();;
+ TCGv low = tcg_temp_new();
tcg_gen_ext16u_i32(low, REG(B7_4));
tcg_gen_bswap16_i32(low, low);
tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
@@ -1176,22 +1181,22 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0x8b00: /* bf label */
CHECK_NOT_DELAY_SLOT
- gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false);
+ gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
return;
case 0x8f00: /* bf/s label */
CHECK_NOT_DELAY_SLOT
tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
- ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ctx->envflags |= DELAY_SLOT_CONDITIONAL;
return;
case 0x8900: /* bt label */
CHECK_NOT_DELAY_SLOT
- gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true);
+ gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
return;
case 0x8d00: /* bt/s label */
CHECK_NOT_DELAY_SLOT
tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
- ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
+ ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ctx->envflags |= DELAY_SLOT_CONDITIONAL;
return;
case 0x8800: /* cmp/eq #imm,R0 */
@@ -1278,7 +1283,8 @@ static void _decode_opc(DisasContext * ctx)
}
return;
case 0xc700: /* mova @(disp,PC),R0 */
- tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
+ tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
+ 4 + B7_0 * 4) & ~3);
return;
case 0xcb00: /* or #imm,R0 */
tcg_gen_ori_i32(REG(0), REG(0), B7_0);
@@ -1304,7 +1310,7 @@ static void _decode_opc(DisasContext * ctx)
imm = tcg_const_i32(B7_0);
gen_helper_trapa(cpu_env, imm);
tcg_temp_free(imm);
- ctx->bstate = BS_EXCP;
+ ctx->base.is_jmp = DISAS_NORETURN;
}
return;
case 0xc800: /* tst #imm,R0 */
@@ -1372,13 +1378,13 @@ static void _decode_opc(DisasContext * ctx)
switch (ctx->opcode & 0xf0ff) {
case 0x0023: /* braf Rn */
CHECK_NOT_DELAY_SLOT
- tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
+ tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
ctx->envflags |= DELAY_SLOT;
ctx->delayed_pc = (uint32_t) - 1;
return;
case 0x0003: /* bsrf Rn */
CHECK_NOT_DELAY_SLOT
- tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
+ tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
ctx->envflags |= DELAY_SLOT;
ctx->delayed_pc = (uint32_t) - 1;
@@ -1401,7 +1407,7 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0x400b: /* jsr @Rn */
CHECK_NOT_DELAY_SLOT
- tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
+ tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ctx->envflags |= DELAY_SLOT;
ctx->delayed_pc = (uint32_t) - 1;
@@ -1413,7 +1419,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
gen_write_sr(val);
tcg_temp_free(val);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
}
return;
case 0x4007: /* ldc.l @Rm+,SR */
@@ -1425,7 +1431,7 @@ static void _decode_opc(DisasContext * ctx)
gen_write_sr(val);
tcg_temp_free(val);
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
}
return;
case 0x0002: /* stc SR,Rn */
@@ -1487,7 +1493,7 @@ static void _decode_opc(DisasContext * ctx)
case 0x406a: /* lds Rm,FPSCR */
CHECK_FPU_ENABLED
gen_helper_ld_fpscr(cpu_env, REG(B11_8));
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
return;
case 0x4066: /* lds.l @Rm+,FPSCR */
CHECK_FPU_ENABLED
@@ -1497,7 +1503,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
gen_helper_ld_fpscr(cpu_env, addr);
tcg_temp_free(addr);
- ctx->bstate = BS_STOP;
+ ctx->base.is_jmp = DISAS_STOP;
}
return;
case 0x006a: /* sts FPSCR,Rn */
@@ -1524,6 +1530,7 @@ static void _decode_opc(DisasContext * ctx)
tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
gen_helper_movcal(cpu_env, REG(B11_8), val);
tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_temp_free(val);
}
ctx->has_movcal = 1;
return;
@@ -1547,31 +1554,64 @@ static void _decode_opc(DisasContext * ctx)
return;
case 0x0073:
/* MOVCO.L
- LDST -> T
- If (T == 1) R0 -> (Rn)
- 0 -> LDST
- */
+ * LDST -> T
+ * If (T == 1) R0 -> (Rn)
+ * 0 -> LDST
+ *
+ * The above description doesn't work in a parallel context.
+ * Since we currently support no smp boards, this implies user-mode.
+ * But we can still support the official mechanism while user-mode
+ * is single-threaded. */
CHECK_SH4A
{
- TCGLabel *label = gen_new_label();
- tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
- tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
- gen_set_label(label);
- tcg_gen_movi_i32(cpu_ldst, 0);
- return;
+ TCGLabel *fail = gen_new_label();
+ TCGLabel *done = gen_new_label();
+
+ if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
+ TCGv tmp;
+
+ tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
+ cpu_lock_addr, fail);
+ tmp = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
+ REG(0), ctx->memidx, MO_TEUL);
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_movi_i32(cpu_sr_t, 1);
+ }
+ tcg_gen_br(done);
+
+ gen_set_label(fail);
+ tcg_gen_movi_i32(cpu_sr_t, 0);
+
+ gen_set_label(done);
+ tcg_gen_movi_i32(cpu_lock_addr, -1);
}
+ return;
case 0x0063:
/* MOVLI.L @Rm,R0
- 1 -> LDST
- (Rm) -> R0
- When interrupt/exception
- occurred 0 -> LDST
- */
+ * 1 -> LDST
+ * (Rm) -> R0
+ * When interrupt/exception
+ * occurred 0 -> LDST
+ *
+ * In a parallel context, we must also save the loaded value
+ * for use with the cmpxchg that we'll use with movco.l. */
CHECK_SH4A
- tcg_gen_movi_i32(cpu_ldst, 0);
- tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
- tcg_gen_movi_i32(cpu_ldst, 1);
+ if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, REG(B11_8));
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_mov_i32(cpu_lock_value, REG(0));
+ tcg_gen_mov_i32(cpu_lock_addr, tmp);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_movi_i32(cpu_lock_addr, 0);
+ }
return;
case 0x0093: /* ocbi @Rn */
{
@@ -1789,7 +1829,7 @@ static void _decode_opc(DisasContext * ctx)
}
#if 0
fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
- ctx->opcode, ctx->pc);
+ ctx->opcode, ctx->base.pc_next);
fflush(stderr);
#endif
do_illegal:
@@ -1801,7 +1841,7 @@ static void _decode_opc(DisasContext * ctx)
gen_save_cpu_state(ctx, true);
gen_helper_raise_illegal_instruction(cpu_env);
}
- ctx->bstate = BS_EXCP;
+ ctx->base.is_jmp = DISAS_NORETURN;
return;
do_fpu_disabled:
@@ -1811,7 +1851,7 @@ static void _decode_opc(DisasContext * ctx)
} else {
gen_helper_raise_fpu_disable(cpu_env);
}
- ctx->bstate = BS_EXCP;
+ ctx->base.is_jmp = DISAS_NORETURN;
return;
}
@@ -1837,7 +1877,6 @@ static void decode_opc(DisasContext * ctx)
ctx->envflags &= ~GUSA_MASK;
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
- ctx->bstate = BS_BRANCH;
if (old_flags & DELAY_SLOT_CONDITIONAL) {
gen_delayed_conditional_jump(ctx);
} else {
@@ -1864,8 +1903,8 @@ static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
int mv_src, mt_dst, st_src, st_mop;
TCGv op_arg;
- uint32_t pc = ctx->pc;
- uint32_t pc_end = ctx->tb->cs_base;
+ uint32_t pc = ctx->base.pc_next;
+ uint32_t pc_end = ctx->base.tb->cs_base;
int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
int max_insns = (pc_end - pc) / 2;
int i;
@@ -1901,7 +1940,7 @@ static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
op_dst = op_src = op_opc = -1;
mt_dst = -1;
st_src = st_mop = -1;
- TCGV_UNUSED(op_arg);
+ op_arg = NULL;
i = 0;
#define NEXT_INSN \
@@ -2189,13 +2228,13 @@ static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
}
/* If op_src is not a valid register, then op_arg was a constant. */
- if (op_src < 0) {
+ if (op_src < 0 && op_arg) {
tcg_temp_free_i32(op_arg);
}
/* The entire region has been translated. */
ctx->envflags &= ~GUSA_MASK;
- ctx->pc = pc_end;
+ ctx->base.pc_next = pc_end;
return max_insns;
fail:
@@ -2208,13 +2247,13 @@ static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
ctx->envflags |= GUSA_EXCLUSIVE;
gen_save_cpu_state(ctx, false);
gen_helper_exclusive(cpu_env);
- ctx->bstate = BS_EXCP;
+ ctx->base.is_jmp = DISAS_NORETURN;
/* We're not executing an instruction, but we must report one for the
purposes of accounting within the TB. We might as well report the
- entire region consumed via ctx->pc so that it's immediately available
- in the disassembly dump. */
- ctx->pc = pc_end;
+ entire region consumed via ctx->base.pc_next so that it's immediately
+ available in the disassembly dump. */
+ ctx->base.pc_next = pc_end;
return 1;
}
#endif
@@ -2228,16 +2267,16 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
int max_insns;
pc_start = tb->pc;
- ctx.pc = pc_start;
+ ctx.base.pc_next = pc_start;
ctx.tbflags = (uint32_t)tb->flags;
ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
- ctx.bstate = BS_NONE;
+ ctx.base.is_jmp = DISAS_NEXT;
ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
/* We don't know if the delayed pc came from a dynamic or static branch,
so assume it is a dynamic branch. */
ctx.delayed_pc = -1; /* use delayed pc from env pointer */
- ctx.tb = tb;
- ctx.singlestep_enabled = cs->singlestep_enabled;
+ ctx.base.tb = tb;
+ ctx.base.singlestep_enabled = cs->singlestep_enabled;
ctx.features = env->features;
ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
@@ -2252,11 +2291,11 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
/* Since the ISA is fixed-width, we can bound by the number
of instructions remaining on the page. */
- num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2;
+ num_insns = -(ctx.base.pc_next | TARGET_PAGE_MASK) / 2;
max_insns = MIN(max_insns, num_insns);
/* Single stepping means just that. */
- if (ctx.singlestep_enabled || singlestep) {
+ if (ctx.base.singlestep_enabled || singlestep) {
max_insns = 1;
}
@@ -2269,22 +2308,22 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
}
#endif
- while (ctx.bstate == BS_NONE
+ while (ctx.base.is_jmp == DISAS_NEXT
&& num_insns < max_insns
&& !tcg_op_buf_full()) {
- tcg_gen_insn_start(ctx.pc, ctx.envflags);
+ tcg_gen_insn_start(ctx.base.pc_next, ctx.envflags);
num_insns++;
- if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+ if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) {
/* We have hit a breakpoint - make sure PC is up-to-date */
gen_save_cpu_state(&ctx, true);
gen_helper_debug(cpu_env);
- ctx.bstate = BS_EXCP;
+ ctx.base.is_jmp = DISAS_NORETURN;
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
- ctx.pc += 2;
+ ctx.base.pc_next += 2;
break;
}
@@ -2292,9 +2331,9 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
gen_io_start();
}
- ctx.opcode = cpu_lduw_code(env, ctx.pc);
+ ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next);
decode_opc(&ctx);
- ctx.pc += 2;
+ ctx.base.pc_next += 2;
}
if (tb_cflags(tb) & CF_LAST_IO) {
gen_io_end();
@@ -2305,30 +2344,28 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
ctx.envflags &= ~GUSA_MASK;
}
- if (cs->singlestep_enabled) {
+ switch (ctx.base.is_jmp) {
+ case DISAS_STOP:
gen_save_cpu_state(&ctx, true);
- gen_helper_debug(cpu_env);
- } else {
- switch (ctx.bstate) {
- case BS_STOP:
- gen_save_cpu_state(&ctx, true);
+ if (ctx.base.singlestep_enabled) {
+ gen_helper_debug(cpu_env);
+ } else {
tcg_gen_exit_tb(0);
- break;
- case BS_NONE:
- gen_save_cpu_state(&ctx, false);
- gen_goto_tb(&ctx, 0, ctx.pc);
- break;
- case BS_EXCP:
- /* fall through */
- case BS_BRANCH:
- default:
- break;
- }
+ }
+ break;
+ case DISAS_NEXT:
+ gen_save_cpu_state(&ctx, false);
+ gen_goto_tb(&ctx, 0, ctx.base.pc_next);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
}
gen_tb_end(tb, num_insns);
- tb->size = ctx.pc - pc_start;
+ tb->size = ctx.base.pc_next - pc_start;
tb->icount = num_insns;
#ifdef DEBUG_DISAS
@@ -2336,7 +2373,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
&& qemu_log_in_addr_range(pc_start)) {
qemu_log_lock();
qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
- log_target_disas(cs, pc_start, ctx.pc - pc_start);
+ log_target_disas(cs, pc_start, ctx.base.pc_next - pc_start);
qemu_log("\n");
qemu_log_unlock();
}
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 849a02aebd..71e0853e43 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -5922,7 +5922,7 @@ void sparc_tcg_init(void)
*rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
}
- TCGV_UNUSED(cpu_regs[0]);
+ cpu_regs[0] = NULL;
for (i = 1; i < 8; ++i) {
cpu_regs[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUSPARCState, gregs[i]),
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
index d55549dabc..d63bf5bba3 100644
--- a/target/tilegx/translate.c
+++ b/target/tilegx/translate.c
@@ -143,7 +143,7 @@ static bool check_gr(DisasContext *dc, uint8_t reg)
static TCGv load_zero(DisasContext *dc)
{
- if (TCGV_IS_UNUSED_I64(dc->zero)) {
+ if (!dc->zero) {
dc->zero = tcg_const_i64(0);
}
return dc->zero;
@@ -2324,7 +2324,7 @@ static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
DisasContextTemp *wb = &dc->wb[i];
wb->reg = TILEGX_R_NOREG;
- TCGV_UNUSED_I64(wb->val);
+ wb->val = NULL;
}
dc->num_wb = 0;
@@ -2384,9 +2384,9 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
dc->exit_tb = false;
dc->atomic_excp = TILEGX_EXCP_NONE;
dc->jmp.cond = TCG_COND_NEVER;
- TCGV_UNUSED_I64(dc->jmp.dest);
- TCGV_UNUSED_I64(dc->jmp.val1);
- TCGV_UNUSED_I64(dc->zero);
+ dc->jmp.dest = NULL;
+ dc->jmp.val1 = NULL;
+ dc->zero = NULL;
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log_lock();
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
index 7af202c8c0..40ed229486 100644
--- a/target/tricore/op_helper.c
+++ b/target/tricore/op_helper.c
@@ -31,9 +31,7 @@ raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin,
{
CPUState *cs = CPU(tricore_env_get_cpu(env));
/* in case we come from a helper-call we need to restore the PC */
- if (pc) {
- cpu_restore_state(cs, pc);
- }
+ cpu_restore_state(cs, pc);
/* Tin is loaded into d[15] */
env->gpr_d[15] = tin;
@@ -2804,13 +2802,8 @@ static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env,
CPUState *cs = CPU(tricore_env_get_cpu(env));
cs->exception_index = exception;
env->error_code = error_code;
-
- if (pc) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, pc);
- }
-
- cpu_loop_exit(cs);
+ /* now we have a real cpu fault */
+ cpu_loop_exit_restore(cs, pc);
}
void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
diff --git a/target/unicore32/op_helper.c b/target/unicore32/op_helper.c
index 0872c29faa..8788642a7f 100644
--- a/target/unicore32/op_helper.c
+++ b/target/unicore32/op_helper.c
@@ -251,11 +251,8 @@ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
ret = uc32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
if (unlikely(ret)) {
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
- cpu_loop_exit(cs);
+ /* now we have a real cpu fault */
+ cpu_loop_exit_restore(cs, retaddr);
}
}
#endif
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
index 384aa86027..5b51f2166d 100644
--- a/target/unicore32/translate.c
+++ b/target/unicore32/translate.c
@@ -1230,7 +1230,7 @@ static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
tmp = load_reg(s, UCOP_REG_N);
} else {
- TCGV_UNUSED(tmp);
+ tmp = NULL;
}
switch (UCOP_OPCODES) {
@@ -1652,7 +1652,7 @@ static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
/* compute total size */
loaded_base = 0;
- TCGV_UNUSED(loaded_var);
+ loaded_var = NULL;
n = 0;
for (i = 0; i < 6; i++) {
if (UCOP_SET(i)) {
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 438321c6cc..2cbbeefd53 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -602,8 +602,8 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
- int oi, oi_next, nb_temps, nb_globals;
- TCGOp *prev_mb = NULL;
+ int nb_temps, nb_globals;
+ TCGOp *op, *op_next, *prev_mb = NULL;
struct tcg_temp_info *infos;
TCGTempSet temps_used;
@@ -617,22 +617,18 @@ void tcg_optimize(TCGContext *s)
bitmap_zero(temps_used.l, nb_temps);
infos = tcg_malloc(sizeof(struct tcg_temp_info) * nb_temps);
- for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
+ QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
tcg_target_ulong mask, partmask, affected;
int nb_oargs, nb_iargs, i;
TCGArg tmp;
-
- TCGOp * const op = &s->gen_op_buf[oi];
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
- oi_next = op->next;
-
/* Count the arguments, and initialize the temps that are
going to be used */
if (opc == INDEX_op_call) {
- nb_oargs = op->callo;
- nb_iargs = op->calli;
+ nb_oargs = TCGOP_CALLO(op);
+ nb_iargs = TCGOP_CALLI(op);
for (i = 0; i < nb_oargs + nb_iargs; i++) {
TCGTemp *ts = arg_temp(op->args[i]);
if (ts) {
@@ -1261,9 +1257,6 @@ void tcg_optimize(TCGContext *s)
rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)a);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(a >> 32));
-
- /* We've done all we need to do with the movi. Skip it. */
- oi_next = op2->next;
break;
}
goto do_default;
@@ -1280,9 +1273,6 @@ void tcg_optimize(TCGContext *s)
rh = op->args[1];
tcg_opt_gen_movi(s, op, rl, (int32_t)r);
tcg_opt_gen_movi(s, op2, rh, (int32_t)(r >> 32));
-
- /* We've done all we need to do with the movi. Skip it. */
- oi_next = op2->next;
break;
}
goto do_default;
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 3cad30b1f2..0c509bfe46 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -42,30 +42,6 @@ extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
#define TCGV_HIGH TCGV_HIGH_link_error
#endif
-/* Note that this is optimized for sequential allocation during translate.
- Up to and including filling in the forward link immediately. We'll do
- proper termination of the end of the list after we finish translation. */
-
-static inline TCGOp *tcg_emit_op(TCGOpcode opc)
-{
- TCGContext *ctx = tcg_ctx;
- int oi = ctx->gen_next_op_idx;
- int ni = oi + 1;
- int pi = oi - 1;
- TCGOp *op = &ctx->gen_op_buf[oi];
-
- tcg_debug_assert(oi < OPC_BUF_SIZE);
- ctx->gen_op_buf[0].prev = oi;
- ctx->gen_next_op_idx = ni;
-
- memset(op, 0, offsetof(TCGOp, args));
- op->opc = opc;
- op->prev = pi;
- op->next = ni;
-
- return op;
-}
-
void tcg_gen_op1(TCGOpcode opc, TCGArg a1)
{
TCGOp *op = tcg_emit_op(opc);
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 3129159907..ca07b32b65 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -807,8 +807,6 @@ void tcg_gen_lookup_and_goto_ptr(void);
#define tcg_global_mem_new tcg_global_mem_new_i32
#define tcg_temp_local_new() tcg_temp_local_new_i32()
#define tcg_temp_free tcg_temp_free_i32
-#define TCGV_UNUSED(x) TCGV_UNUSED_I32(x)
-#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I32(x)
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
#else
@@ -817,8 +815,6 @@ void tcg_gen_lookup_and_goto_ptr(void);
#define tcg_global_mem_new tcg_global_mem_new_i64
#define tcg_temp_local_new() tcg_temp_local_new_i64()
#define tcg_temp_free tcg_temp_free_i64
-#define TCGV_UNUSED(x) TCGV_UNUSED_I64(x)
-#define TCGV_IS_UNUSED(x) TCGV_IS_UNUSED_I64(x)
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
#endif
diff --git a/tcg/tcg.c b/tcg/tcg.c
index c22f1c4441..93caa0be93 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -862,9 +862,8 @@ void tcg_func_start(TCGContext *s)
s->goto_tb_issue_mask = 0;
#endif
- s->gen_op_buf[0].next = 1;
- s->gen_op_buf[0].prev = 0;
- s->gen_next_op_idx = 1;
+ QTAILQ_INIT(&s->ops);
+ QTAILQ_INIT(&s->free_ops);
}
static inline TCGTemp *tcg_temp_alloc(TCGContext *s)
@@ -1339,7 +1338,6 @@ bool tcg_op_supported(TCGOpcode op)
and endian swap in tcg_reg_alloc_call(). */
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
{
- TCGContext *s = tcg_ctx;
int i, real_args, nb_rets, pi;
unsigned sizemask, flags;
TCGHelperInfo *info;
@@ -1358,8 +1356,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
TCGv_i64 retl, reth;
TCGTemp *split_args[MAX_OPC_PARAM];
- TCGV_UNUSED_I64(retl);
- TCGV_UNUSED_I64(reth);
+ retl = NULL;
+ reth = NULL;
if (sizemask != 0) {
for (i = real_args = 0; i < nargs; ++i) {
int is_64bit = sizemask & (1 << (i+1)*2);
@@ -1395,17 +1393,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
}
#endif /* TCG_TARGET_EXTEND_ARGS */
- i = s->gen_next_op_idx;
- tcg_debug_assert(i < OPC_BUF_SIZE);
- s->gen_op_buf[0].prev = i;
- s->gen_next_op_idx = i + 1;
- op = &s->gen_op_buf[i];
-
- /* Set links for sequential allocation during translation. */
- memset(op, 0, offsetof(TCGOp, args));
- op->opc = INDEX_op_call;
- op->prev = i - 1;
- op->next = i + 1;
+ op = tcg_emit_op(INDEX_op_call);
pi = 0;
if (ret != NULL) {
@@ -1442,7 +1430,7 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
} else {
nb_rets = 0;
}
- op->callo = nb_rets;
+ TCGOP_CALLO(op) = nb_rets;
real_args = 0;
for (i = 0; i < nargs; i++) {
@@ -1481,10 +1469,10 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
}
op->args[pi++] = (uintptr_t)func;
op->args[pi++] = flags;
- op->calli = real_args;
+ TCGOP_CALLI(op) = real_args;
/* Make sure the fields didn't overflow. */
- tcg_debug_assert(op->calli == real_args);
+ tcg_debug_assert(TCGOP_CALLI(op) == real_args);
tcg_debug_assert(pi <= ARRAY_SIZE(op->args));
#if defined(__sparc__) && !defined(__arch64__) \
@@ -1622,20 +1610,18 @@ void tcg_dump_ops(TCGContext *s)
{
char buf[128];
TCGOp *op;
- int oi;
- for (oi = s->gen_op_buf[0].next; oi != 0; oi = op->next) {
+ QTAILQ_FOREACH(op, &s->ops, link) {
int i, k, nb_oargs, nb_iargs, nb_cargs;
const TCGOpDef *def;
TCGOpcode c;
int col = 0;
- op = &s->gen_op_buf[oi];
c = op->opc;
def = &tcg_op_defs[c];
if (c == INDEX_op_insn_start) {
- col += qemu_log("%s ----", oi != s->gen_op_buf[0].next ? "\n" : "");
+ col += qemu_log("\n ----");
for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
target_ulong a;
@@ -1648,8 +1634,8 @@ void tcg_dump_ops(TCGContext *s)
}
} else if (c == INDEX_op_call) {
/* variable number of arguments */
- nb_oargs = op->callo;
- nb_iargs = op->calli;
+ nb_oargs = TCGOP_CALLO(op);
+ nb_iargs = TCGOP_CALLI(op);
nb_cargs = def->nb_cargs;
/* function name, flags, out args */
@@ -1898,65 +1884,51 @@ static void process_op_defs(TCGContext *s)
void tcg_op_remove(TCGContext *s, TCGOp *op)
{
- int next = op->next;
- int prev = op->prev;
-
- /* We should never attempt to remove the list terminator. */
- tcg_debug_assert(op != &s->gen_op_buf[0]);
-
- s->gen_op_buf[next].prev = prev;
- s->gen_op_buf[prev].next = next;
-
- memset(op, 0, sizeof(*op));
+ QTAILQ_REMOVE(&s->ops, op, link);
+ QTAILQ_INSERT_TAIL(&s->free_ops, op, link);
#ifdef CONFIG_PROFILER
atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1);
#endif
}
-TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
- TCGOpcode opc, int nargs)
+static TCGOp *tcg_op_alloc(TCGOpcode opc)
{
- int oi = s->gen_next_op_idx;
- int prev = old_op->prev;
- int next = old_op - s->gen_op_buf;
- TCGOp *new_op;
+ TCGContext *s = tcg_ctx;
+ TCGOp *op;
- tcg_debug_assert(oi < OPC_BUF_SIZE);
- s->gen_next_op_idx = oi + 1;
+ if (likely(QTAILQ_EMPTY(&s->free_ops))) {
+ op = tcg_malloc(sizeof(TCGOp));
+ } else {
+ op = QTAILQ_FIRST(&s->free_ops);
+ QTAILQ_REMOVE(&s->free_ops, op, link);
+ }
+ memset(op, 0, offsetof(TCGOp, link));
+ op->opc = opc;
- new_op = &s->gen_op_buf[oi];
- *new_op = (TCGOp){
- .opc = opc,
- .prev = prev,
- .next = next
- };
- s->gen_op_buf[prev].next = oi;
- old_op->prev = oi;
+ return op;
+}
+
+TCGOp *tcg_emit_op(TCGOpcode opc)
+{
+ TCGOp *op = tcg_op_alloc(opc);
+ QTAILQ_INSERT_TAIL(&tcg_ctx->ops, op, link);
+ return op;
+}
+TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
+ TCGOpcode opc, int nargs)
+{
+ TCGOp *new_op = tcg_op_alloc(opc);
+ QTAILQ_INSERT_BEFORE(old_op, new_op, link);
return new_op;
}
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
TCGOpcode opc, int nargs)
{
- int oi = s->gen_next_op_idx;
- int prev = old_op - s->gen_op_buf;
- int next = old_op->next;
- TCGOp *new_op;
-
- tcg_debug_assert(oi < OPC_BUF_SIZE);
- s->gen_next_op_idx = oi + 1;
-
- new_op = &s->gen_op_buf[oi];
- *new_op = (TCGOp){
- .opc = opc,
- .prev = prev,
- .next = next
- };
- s->gen_op_buf[next].prev = oi;
- old_op->next = oi;
-
+ TCGOp *new_op = tcg_op_alloc(opc);
+ QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
return new_op;
}
@@ -2006,30 +1978,26 @@ static void tcg_la_bb_end(TCGContext *s)
static void liveness_pass_1(TCGContext *s)
{
int nb_globals = s->nb_globals;
- int oi, oi_prev;
+ TCGOp *op, *op_prev;
tcg_la_func_end(s);
- for (oi = s->gen_op_buf[0].prev; oi != 0; oi = oi_prev) {
+ QTAILQ_FOREACH_REVERSE_SAFE(op, &s->ops, TCGOpHead, link, op_prev) {
int i, nb_iargs, nb_oargs;
TCGOpcode opc_new, opc_new2;
bool have_opc_new2;
TCGLifeData arg_life = 0;
TCGTemp *arg_ts;
-
- TCGOp * const op = &s->gen_op_buf[oi];
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
- oi_prev = op->prev;
-
switch (opc) {
case INDEX_op_call:
{
int call_flags;
- nb_oargs = op->callo;
- nb_iargs = op->calli;
+ nb_oargs = TCGOP_CALLO(op);
+ nb_iargs = TCGOP_CALLI(op);
call_flags = op->args[nb_oargs + nb_iargs + 1];
/* pure functions can be removed if their result is unused */
@@ -2233,8 +2201,9 @@ static void liveness_pass_1(TCGContext *s)
static bool liveness_pass_2(TCGContext *s)
{
int nb_globals = s->nb_globals;
- int nb_temps, i, oi, oi_next;
+ int nb_temps, i;
bool changes = false;
+ TCGOp *op, *op_next;
/* Create a temporary for each indirect global. */
for (i = 0; i < nb_globals; ++i) {
@@ -2256,19 +2225,16 @@ static bool liveness_pass_2(TCGContext *s)
its->state = TS_DEAD;
}
- for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
- TCGOp *op = &s->gen_op_buf[oi];
+ QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
TCGLifeData arg_life = op->life;
int nb_iargs, nb_oargs, call_flags;
TCGTemp *arg_ts, *dir_ts;
- oi_next = op->next;
-
if (opc == INDEX_op_call) {
- nb_oargs = op->callo;
- nb_iargs = op->calli;
+ nb_oargs = TCGOP_CALLO(op);
+ nb_iargs = TCGOP_CALLI(op);
call_flags = op->args[nb_oargs + nb_iargs + 1];
} else {
nb_iargs = def->nb_iargs;
@@ -2949,8 +2915,8 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
{
- const int nb_oargs = op->callo;
- const int nb_iargs = op->calli;
+ const int nb_oargs = TCGOP_CALLO(op);
+ const int nb_iargs = TCGOP_CALLI(op);
const TCGLifeData arg_life = op->life;
int flags, nb_regs, i;
TCGReg reg;
@@ -3168,13 +3134,16 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#ifdef CONFIG_PROFILER
TCGProfile *prof = &s->prof;
#endif
- int i, oi, oi_next, num_insns;
+ int i, num_insns;
+ TCGOp *op;
#ifdef CONFIG_PROFILER
{
int n;
- n = s->gen_op_buf[0].prev + 1;
+ QTAILQ_FOREACH(op, &s->ops, link) {
+ n++;
+ }
atomic_set(&prof->op_count, prof->op_count + n);
if (n > prof->op_count_max) {
atomic_set(&prof->op_count_max, n);
@@ -3260,11 +3229,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#endif
num_insns = -1;
- for (oi = s->gen_op_buf[0].next; oi != 0; oi = oi_next) {
- TCGOp * const op = &s->gen_op_buf[oi];
+ QTAILQ_FOREACH(op, &s->ops, link) {
TCGOpcode opc = op->opc;
- oi_next = op->next;
#ifdef CONFIG_PROFILER
atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
#endif
diff --git a/tcg/tcg.h b/tcg/tcg.h
index cb7b329876..2ce497cebf 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -29,6 +29,7 @@
#include "cpu.h"
#include "exec/tb-context.h"
#include "qemu/bitops.h"
+#include "qemu/queue.h"
#include "tcg-mo.h"
#include "tcg-target.h"
@@ -40,7 +41,7 @@
#else
#define MAX_OPC_PARAM_PER_ARG 1
#endif
-#define MAX_OPC_PARAM_IARGS 5
+#define MAX_OPC_PARAM_IARGS 6
#define MAX_OPC_PARAM_OARGS 1
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
@@ -48,8 +49,6 @@
* and up to 4 + N parameters on 64-bit archs
* (N = number of input arguments + output arguments). */
#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
-#define OPC_BUF_SIZE 640
-#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
#define CPU_TEMP_BUF_NLONGS 128
@@ -428,15 +427,6 @@ typedef TCGv_ptr TCGv_env;
#error Unhandled TARGET_LONG_BITS value
#endif
-/* See the comment before tcgv_i32_temp. */
-#define TCGV_UNUSED_I32(x) (x = (TCGv_i32)NULL)
-#define TCGV_UNUSED_I64(x) (x = (TCGv_i64)NULL)
-#define TCGV_UNUSED_PTR(x) (x = (TCGv_ptr)NULL)
-
-#define TCGV_IS_UNUSED_I32(x) ((x) == (TCGv_i32)NULL)
-#define TCGV_IS_UNUSED_I64(x) ((x) == (TCGv_i64)NULL)
-#define TCGV_IS_UNUSED_PTR(x) ((x) == (TCGv_ptr)NULL)
-
/* call flags */
/* Helper does not read globals (either directly or through an exception). It
implies TCG_CALL_NO_WRITE_GLOBALS. */
@@ -498,6 +488,12 @@ static inline TCGCond tcg_unsigned_cond(TCGCond c)
return c & 2 ? (TCGCond)(c ^ 6) : c;
}
+/* Create a "signed" version of an "unsigned" comparison. */
+static inline TCGCond tcg_signed_cond(TCGCond c)
+{
+ return c & 4 ? (TCGCond)(c ^ 6) : c;
+}
+
/* Must a comparison be considered unsigned? */
static inline bool is_unsigned_cond(TCGCond c)
{
@@ -576,28 +572,25 @@ typedef uint16_t TCGLifeData;
typedef struct TCGOp {
TCGOpcode opc : 8; /* 8 */
- /* The number of out and in parameter for a call. */
- unsigned calli : 4; /* 12 */
- unsigned callo : 2; /* 14 */
- unsigned : 2; /* 16 */
-
- /* Index of the prev/next op, or 0 for the end of the list. */
- unsigned prev : 16; /* 32 */
- unsigned next : 16; /* 48 */
+ /* Parameters for this opcode. See below. */
+ unsigned param1 : 4; /* 12 */
+ unsigned param2 : 4; /* 16 */
/* Lifetime data of the operands. */
- unsigned life : 16; /* 64 */
+ unsigned life : 16; /* 32 */
+
+ /* Next and previous opcodes. */
+ QTAILQ_ENTRY(TCGOp) link;
/* Arguments for the opcode. */
TCGArg args[MAX_OPC_PARAM];
} TCGOp;
-/* Make sure that we don't expand the structure without noticing. */
-QEMU_BUILD_BUG_ON(sizeof(TCGOp) != 8 + sizeof(TCGArg) * MAX_OPC_PARAM);
+#define TCGOP_CALLI(X) (X)->param1
+#define TCGOP_CALLO(X) (X)->param2
/* Make sure operands fit in the bitfields above. */
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
-QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 16));
typedef struct TCGProfile {
int64_t tb_count1;
@@ -651,8 +644,6 @@ struct TCGContext {
int goto_tb_issue_mask;
#endif
- int gen_next_op_idx;
-
/* Code generation. Note that we specifically do not use tcg_insn_unit
here, because there's too much arithmetic throughout that relies
on addition and subtraction working on bytes. Rely on the GCC
@@ -683,12 +674,12 @@ struct TCGContext {
TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */
+ QTAILQ_HEAD(TCGOpHead, TCGOp) ops, free_ops;
+
/* Tells which temporary holds a given register.
It does not take into account fixed registers */
TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
- TCGOp gen_op_buf[OPC_BUF_SIZE];
-
uint16_t gen_insn_end_off[TCG_MAX_INSNS];
target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
};
@@ -778,21 +769,21 @@ static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
}
#endif
-static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
+static inline void tcg_set_insn_param(TCGOp *op, int arg, TCGArg v)
{
- tcg_ctx->gen_op_buf[op_idx].args[arg] = v;
+ op->args[arg] = v;
}
-/* The number of opcodes emitted so far. */
-static inline int tcg_op_buf_count(void)
+/* The last op that was emitted. */
+static inline TCGOp *tcg_last_op(void)
{
- return tcg_ctx->gen_next_op_idx;
+ return QTAILQ_LAST(&tcg_ctx->ops, TCGOpHead);
}
/* Test for whether to terminate the TB for using too many opcodes. */
static inline bool tcg_op_buf_full(void)
{
- return tcg_op_buf_count() >= OPC_MAX_SIZE;
+ return false;
}
/* pool based memory allocation */
@@ -976,6 +967,7 @@ bool tcg_op_supported(TCGOpcode op);
void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
+TCGOp *tcg_emit_op(TCGOpcode opc);
void tcg_op_remove(TCGContext *s, TCGOp *op);
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
diff --git a/tcg/tci.c b/tcg/tci.c
index 63f2cd54ab..33edca1903 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -40,7 +40,7 @@
tcg_abort(); \
} while (0)
-#if MAX_OPC_PARAM_IARGS != 5
+#if MAX_OPC_PARAM_IARGS != 6
# error Fix needed, number of supported input arguments changed!
#endif
#if TCG_TARGET_REG_BITS == 32
@@ -48,11 +48,12 @@ typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
tcg_target_ulong, tcg_target_ulong,
tcg_target_ulong, tcg_target_ulong,
tcg_target_ulong, tcg_target_ulong,
+ tcg_target_ulong, tcg_target_ulong,
tcg_target_ulong, tcg_target_ulong);
#else
typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
tcg_target_ulong, tcg_target_ulong,
- tcg_target_ulong);
+ tcg_target_ulong, tcg_target_ulong);
#endif
static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
@@ -520,7 +521,9 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
tci_read_reg(regs, TCG_REG_R7),
tci_read_reg(regs, TCG_REG_R8),
tci_read_reg(regs, TCG_REG_R9),
- tci_read_reg(regs, TCG_REG_R10));
+ tci_read_reg(regs, TCG_REG_R10),
+ tci_read_reg(regs, TCG_REG_R11),
+ tci_read_reg(regs, TCG_REG_R12));
tci_write_reg(regs, TCG_REG_R0, tmp64);
tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
#else
@@ -528,7 +531,8 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
tci_read_reg(regs, TCG_REG_R1),
tci_read_reg(regs, TCG_REG_R2),
tci_read_reg(regs, TCG_REG_R3),
- tci_read_reg(regs, TCG_REG_R5));
+ tci_read_reg(regs, TCG_REG_R5),
+ tci_read_reg(regs, TCG_REG_R6));
tci_write_reg(regs, TCG_REG_R0, tmp64);
#endif
break;
diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c
index 913c3802a3..cc949bea85 100644
--- a/tcg/tci/tcg-target.inc.c
+++ b/tcg/tci/tcg-target.inc.c
@@ -292,7 +292,7 @@ static const int tcg_target_reg_alloc_order[] = {
#endif
};
-#if MAX_OPC_PARAM_IARGS != 5
+#if MAX_OPC_PARAM_IARGS != 6
# error Fix needed, number of supported input arguments changed!
#endif
@@ -305,14 +305,16 @@ static const int tcg_target_call_iarg_regs[] = {
TCG_REG_R4,
#endif
TCG_REG_R5,
+ TCG_REG_R6,
#if TCG_TARGET_REG_BITS == 32
/* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
- TCG_REG_R6,
TCG_REG_R7,
#if TCG_TARGET_NB_REGS >= 16
TCG_REG_R8,
TCG_REG_R9,
TCG_REG_R10,
+ TCG_REG_R11,
+ TCG_REG_R12,
#else
# error Too few input registers available
#endif
diff --git a/tests/Makefile.include b/tests/Makefile.include
index c002352134..39a4b5359d 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -3,21 +3,21 @@
check-help:
@echo "Regression testing targets:"
@echo
- @echo " make check Run all tests"
- @echo " make check-qtest-TARGET Run qtest tests for given target"
- @echo " make check-qtest Run qtest tests"
- @echo " make check-unit Run qobject tests"
- @echo " make check-speed Run qobject speed tests"
- @echo " make check-qapi-schema Run QAPI schema tests"
- @echo " make check-block Run block tests"
- @echo " make check-report.html Generates an HTML test report"
- @echo " make check-clean Clean the tests"
+ @echo " $(MAKE) check Run all tests"
+ @echo " $(MAKE) check-qtest-TARGET Run qtest tests for given target"
+ @echo " $(MAKE) check-qtest Run qtest tests"
+ @echo " $(MAKE) check-unit Run qobject tests"
+ @echo " $(MAKE) check-speed Run qobject speed tests"
+ @echo " $(MAKE) check-qapi-schema Run QAPI schema tests"
+ @echo " $(MAKE) check-block Run block tests"
+ @echo " $(MAKE) check-report.html Generates an HTML test report"
+ @echo " $(MAKE) check-clean Clean the tests"
@echo
@echo "Please note that HTML reports do not regenerate if the unit tests"
@echo "has not changed."
@echo
@echo "The variable SPEED can be set to control the gtester speed setting."
- @echo "Default options are -k and (for make V=1) --verbose; they can be"
+ @echo "Default options are -k and (for $(MAKE) V=1) --verbose; they can be"
@echo "changed with variable GTESTER_OPTIONS."
ifneq ($(wildcard config-host.mak),)
@@ -80,6 +80,7 @@ gcov-files-test-thread-pool-y = thread-pool.c
gcov-files-test-hbitmap-y = util/hbitmap.c
check-unit-y += tests/test-hbitmap$(EXESUF)
gcov-files-test-hbitmap-y = blockjob.c
+check-unit-y += tests/test-bdrv-drain$(EXESUF)
check-unit-y += tests/test-blockjob$(EXESUF)
check-unit-y += tests/test-blockjob-txn$(EXESUF)
check-unit-y += tests/test-x86-cpuid$(EXESUF)
@@ -297,6 +298,8 @@ gcov-files-x86_64-y = $(subst i386-softmmu/,x86_64-softmmu/,$(gcov-files-i386-y)
check-qtest-alpha-y = tests/boot-serial-test$(EXESUF)
+check-qtest-m68k-y = tests/boot-serial-test$(EXESUF)
+
check-qtest-mips-y = tests/endianness-test$(EXESUF)
check-qtest-mips64-y = tests/endianness-test$(EXESUF)
@@ -416,6 +419,7 @@ qapi-schema += command-int.json
qapi-schema += comments.json
qapi-schema += doc-bad-alternate-member.json
qapi-schema += doc-bad-command-arg.json
+qapi-schema += doc-bad-section.json
qapi-schema += doc-bad-symbol.json
qapi-schema += doc-bad-union-member.json
qapi-schema += doc-before-include.json
@@ -433,10 +437,10 @@ qapi-schema += doc-invalid-end2.json
qapi-schema += doc-invalid-return.json
qapi-schema += doc-invalid-section.json
qapi-schema += doc-invalid-start.json
-qapi-schema += doc-missing.json
qapi-schema += doc-missing-colon.json
qapi-schema += doc-missing-expr.json
qapi-schema += doc-missing-space.json
+qapi-schema += doc-missing.json
qapi-schema += doc-no-symbol.json
qapi-schema += double-data.json
qapi-schema += double-type.json
@@ -592,6 +596,7 @@ tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(test-block-obj-y)
tests/test-aio$(EXESUF): tests/test-aio.o $(test-block-obj-y)
tests/test-aio-multithread$(EXESUF): tests/test-aio-multithread.o $(test-block-obj-y)
tests/test-throttle$(EXESUF): tests/test-throttle.o $(test-block-obj-y)
+tests/test-bdrv-drain$(EXESUF): tests/test-bdrv-drain.o $(test-block-obj-y) $(test-util-obj-y)
tests/test-blockjob$(EXESUF): tests/test-blockjob.o $(test-block-obj-y) $(test-util-obj-y)
tests/test-blockjob-txn$(EXESUF): tests/test-blockjob-txn.o $(test-block-obj-y) $(test-util-obj-y)
tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(test-block-obj-y)
diff --git a/tests/boot-serial-test.c b/tests/boot-serial-test.c
index c935d69824..dd3828c49b 100644
--- a/tests/boot-serial-test.c
+++ b/tests/boot-serial-test.c
@@ -7,19 +7,31 @@
* or later. See the COPYING file in the top-level directory.
*
* This test is used to check that the serial output of the firmware
- * (that we provide for some machines) contains an expected string.
- * Thus we check that the firmware still boots at least to a certain
- * point and so we know that the machine is not completely broken.
+ * (that we provide for some machines) or some small mini-kernels that
+ * we provide here contains an expected string. Thus we check that the
+ * firmware/kernel still boots at least to a certain point and so we
+ * know that the machine is not completely broken.
*/
#include "qemu/osdep.h"
#include "libqtest.h"
+static const uint8_t kernel_mcf5208[] = {
+ 0x41, 0xf9, 0xfc, 0x06, 0x00, 0x00, /* lea 0xfc060000,%a0 */
+ 0x10, 0x3c, 0x00, 0x54, /* move.b #'T',%d0 */
+ 0x11, 0x7c, 0x00, 0x04, 0x00, 0x08, /* move.b #4,8(%a0) Enable TX */
+ 0x11, 0x40, 0x00, 0x0c, /* move.b %d0,12(%a0) Print 'T' */
+ 0x60, 0xfa /* bra.s loop */
+};
+
typedef struct testdef {
const char *arch; /* Target architecture */
const char *machine; /* Name of the machine */
const char *extra; /* Additional parameters */
const char *expect; /* Expected string in the serial output */
+ size_t codesize; /* Size of the kernel or bios data */
+ const uint8_t *kernel; /* Set in case we use our own mini kernel */
+ const uint8_t *bios; /* Set in case we use our own mini bios */
} testdef_t;
static testdef_t tests[] = {
@@ -37,18 +49,21 @@ static testdef_t tests[] = {
{ "x86_64", "q35", "-device sga", "SGABIOS" },
{ "s390x", "s390-ccw-virtio",
"-nodefaults -device sclpconsole,chardev=serial0", "virtio device" },
+ { "m68k", "mcf5208evb", "", "TT", sizeof(kernel_mcf5208), kernel_mcf5208 },
+
{ NULL }
};
static void check_guest_output(const testdef_t *test, int fd)
{
bool output_ok = false;
- int i, nbr, pos = 0;
+ int i, nbr, pos = 0, ccnt;
char ch;
/* Poll serial output... Wait at most 60 seconds */
for (i = 0; i < 6000; ++i) {
- while ((nbr = read(fd, &ch, 1)) == 1) {
+ ccnt = 0;
+ while ((nbr = read(fd, &ch, 1)) == 1 && ccnt++ < 512) {
if (ch == test->expect[pos]) {
pos += 1;
if (test->expect[pos] == '\0') {
@@ -71,26 +86,52 @@ done:
static void test_machine(const void *data)
{
const testdef_t *test = data;
- char tmpname[] = "/tmp/qtest-boot-serial-XXXXXX";
- int fd;
+ char serialtmp[] = "/tmp/qtest-boot-serial-sXXXXXX";
+ char codetmp[] = "/tmp/qtest-boot-serial-cXXXXXX";
+ const char *codeparam = "";
+ const uint8_t *code = NULL;
+ int ser_fd;
- fd = mkstemp(tmpname);
- g_assert(fd != -1);
+ ser_fd = mkstemp(serialtmp);
+ g_assert(ser_fd != -1);
+
+ if (test->kernel) {
+ code = test->kernel;
+ codeparam = "-kernel";
+ } else if (test->bios) {
+ code = test->bios;
+ codeparam = "-bios";
+ }
+
+ if (code) {
+ ssize_t wlen;
+ int code_fd;
+
+ code_fd = mkstemp(codetmp);
+ g_assert(code_fd != -1);
+ wlen = write(code_fd, code, test->codesize);
+ g_assert(wlen == test->codesize);
+ close(code_fd);
+ }
/*
* Make sure that this test uses tcg if available: It is used as a
* fast-enough smoketest for that.
*/
- global_qtest = qtest_startf("-M %s,accel=tcg:kvm "
+ global_qtest = qtest_startf("%s %s -M %s,accel=tcg:kvm "
"-chardev file,id=serial0,path=%s "
"-no-shutdown -serial chardev:serial0 %s",
- test->machine, tmpname, test->extra);
- unlink(tmpname);
+ codeparam, code ? codetmp : "",
+ test->machine, serialtmp, test->extra);
+ unlink(serialtmp);
+ if (code) {
+ unlink(codetmp);
+ }
- check_guest_output(test, fd);
+ check_guest_output(test, ser_fd);
qtest_quit(global_qtest);
- close(fd);
+ close(ser_fd);
}
int main(int argc, char *argv[])
diff --git a/tests/docker/test-full b/tests/docker/test-full
index 816d5a3eec..b4e42d25d7 100755
--- a/tests/docker/test-full
+++ b/tests/docker/test-full
@@ -1,8 +1,8 @@
#!/bin/bash
#
-# Compile all the targets with as many features enabled as possible
+# Compile all the targets.
#
-# Copyright 2016, 2017 Red Hat Inc.
+# Copyright (c) 2016 Red Hat Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
@@ -13,77 +13,6 @@
. common.rc
-cd "$BUILD_DIR" || exit 1
+cd "$BUILD_DIR"
-build_qemu \
- --enable-attr \
- --enable-bluez \
- --enable-brlapi \
- --enable-bsd-user \
- --enable-bzip2 \
- --enable-cap-ng \
- --enable-coroutine-pool \
- --enable-crypto-afalg \
- --enable-curl \
- --enable-curses \
- --enable-debug \
- --enable-debug-info \
- --enable-debug-tcg \
- --enable-docs \
- --enable-fdt \
- --enable-gcrypt \
- --enable-glusterfs \
- --enable-gnutls \
- --enable-gprof \
- --enable-gtk \
- --enable-guest-agent \
- --enable-jemalloc \
- --enable-kvm \
- --enable-libiscsi \
- --enable-libnfs \
- --enable-libssh2 \
- --enable-libusb \
- --enable-linux-aio \
- --enable-linux-user \
- --enable-live-block-migration \
- --enable-lzo \
- --enable-modules \
- --enable-numa \
- --enable-opengl \
- --enable-pie \
- --enable-profiler \
- --enable-qom-cast-debug \
- --enable-rbd \
- --enable-rdma \
- --enable-replication \
- --enable-sdl \
- --enable-seccomp \
- --enable-smartcard \
- --enable-snappy \
- --enable-spice \
- --enable-stack-protector \
- --enable-system \
- --enable-tcg \
- --enable-tcg-interpreter \
- --enable-tools \
- --enable-tpm \
- --enable-trace-backend=ftrace \
- --enable-usb-redir \
- --enable-user \
- --enable-vde \
- --enable-vhost-net \
- --enable-vhost-scsi \
- --enable-vhost-user \
- --enable-vhost-vsock \
- --enable-virtfs \
- --enable-vnc \
- --enable-vnc-jpeg \
- --enable-vnc-png \
- --enable-vnc-sasl \
- --enable-vte \
- --enable-werror \
- --enable-xen \
- --enable-xen-pci-passthrough \
- --enable-xen-pv-domain-build \
- --enable-xfsctl \
-&& make check $MAKEFLAGS && install_qemu
+build_qemu && make check $MAKEFLAGS && install_qemu
diff --git a/tests/qapi-schema/doc-bad-section.err b/tests/qapi-schema/doc-bad-section.err
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-section.err
diff --git a/tests/qapi-schema/doc-bad-section.exit b/tests/qapi-schema/doc-bad-section.exit
new file mode 100644
index 0000000000..573541ac97
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-section.exit
@@ -0,0 +1 @@
+0
diff --git a/tests/qapi-schema/doc-bad-section.json b/tests/qapi-schema/doc-bad-section.json
new file mode 100644
index 0000000000..560df4b087
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-section.json
@@ -0,0 +1,11 @@
+# = section within an expression comment
+# BUG: not rejected
+
+##
+# @Enum:
+# == Produces *invalid* texinfo
+# @one: The _one_ {and only}
+#
+# @two is undocumented
+##
+{ 'enum': 'Enum', 'data': [ 'one', 'two' ] }
diff --git a/tests/qapi-schema/doc-bad-section.out b/tests/qapi-schema/doc-bad-section.out
new file mode 100644
index 0000000000..089bde1381
--- /dev/null
+++ b/tests/qapi-schema/doc-bad-section.out
@@ -0,0 +1,13 @@
+enum Enum ['one', 'two']
+enum QType ['none', 'qnull', 'qnum', 'qstring', 'qdict', 'qlist', 'qbool']
+ prefix QTYPE
+object q_empty
+doc symbol=Enum
+ body=
+== Produces *invalid* texinfo
+ arg=one
+The _one_ {and only}
+ arg=two
+
+ section=None
+@two is undocumented
diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json
index cfdc0a8a81..97ab4625ff 100644
--- a/tests/qapi-schema/doc-good.json
+++ b/tests/qapi-schema/doc-good.json
@@ -51,7 +51,6 @@
##
# @Enum:
-# == Produces *invalid* texinfo
# @one: The _one_ {and only}
#
# @two is undocumented
diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out
index 63ca25a8b9..1d2c250527 100644
--- a/tests/qapi-schema/doc-good.out
+++ b/tests/qapi-schema/doc-good.out
@@ -77,12 +77,12 @@ Examples:
- {braces}
doc symbol=Enum
body=
-== Produces *invalid* texinfo
+
arg=one
The _one_ {and only}
arg=two
- section=
+ section=None
@two is undocumented
doc symbol=Base
body=
diff --git a/tests/qapi-schema/doc-good.texi b/tests/qapi-schema/doc-good.texi
index c410626e4a..1778312581 100644
--- a/tests/qapi-schema/doc-good.texi
+++ b/tests/qapi-schema/doc-good.texi
@@ -76,7 +76,7 @@ Examples:
@deftp {Enum} Enum
-@subsection Produces @strong{invalid} texinfo
+
@b{Values:}
@table @asis
@@ -101,7 +101,6 @@ Not documented
the first member
@end table
-
@end deftp
@@ -118,7 +117,6 @@ Another paragraph (but no @code{var}: line)
Not documented
@end table
-
@end deftp
@@ -127,7 +125,6 @@ Not documented
-
@end deftp
@@ -143,7 +140,6 @@ Not documented
@item The members of @code{Variant2} when @code{base1} is @t{"two"}
@end table
-
@end deftp
@@ -160,7 +156,6 @@ One of @t{"one"}, @t{"two"}
@item @code{data: Variant2} when @code{type} is @t{"two"}
@end table
-
@end deftp
@@ -182,7 +177,6 @@ argument
Not documented
@end table
-
@b{Note:}
@code{arg3} is undocumented
@@ -209,14 +203,12 @@ Duis aute irure dolor
<- out
@end example
-
@b{Examples:}
@example
- *verbatim*
- @{braces@}
@end example
-
@b{Since:}
2.10
@@ -237,7 +229,6 @@ If you're bored enough to read this, go see a video of boxed cats
<- out
@end example
-
@end deftypefn
diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py
index c7724d3437..fe0ca08d78 100644
--- a/tests/qapi-schema/test-qapi.py
+++ b/tests/qapi-schema/test-qapi.py
@@ -61,8 +61,8 @@ for doc in schema.docs:
print 'doc symbol=%s' % doc.symbol
else:
print 'doc freeform'
- print ' body=\n%s' % doc.body
+ print ' body=\n%s' % doc.body.text
for arg, section in doc.args.iteritems():
- print ' arg=%s\n%s' % (arg, section)
+ print ' arg=%s\n%s' % (arg, section.text)
for section in doc.sections:
- print ' section=%s\n%s' % (section.name, section)
+ print ' section=%s\n%s' % (section.name, section.text)
diff --git a/tests/qemu-iotests/197 b/tests/qemu-iotests/197
index 887eb4f496..5e869fe2b7 100755
--- a/tests/qemu-iotests/197
+++ b/tests/qemu-iotests/197
@@ -60,6 +60,10 @@ echo '=== Copy-on-read ==='
echo
# Prep the images
+# VPC rounds image sizes to a specific geometry, force a specific size.
+if [ "$IMGFMT" = "vpc" ]; then
+ IMGOPTS=$(_optstr_add "$IMGOPTS" "force_size")
+fi
_make_test_img 4G
$QEMU_IO -c "write -P 55 3G 1k" "$TEST_IMG" | _filter_qemu_io
IMGPROTO=file IMGFMT=qcow2 IMGOPTS= TEST_IMG_FILE="$TEST_WRAP" \
diff --git a/tests/qemu-iotests/202 b/tests/qemu-iotests/202
new file mode 100755
index 0000000000..581ca34d79
--- /dev/null
+++ b/tests/qemu-iotests/202
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Creator/Owner: Stefan Hajnoczi <stefanha@redhat.com>
+#
+# Check that QMP 'transaction' blockdev-snapshot-sync with multiple drives on a
+# single IOThread completes successfully. This particular command triggered a
+# hang due to recursive AioContext locking and BDRV_POLL_WHILE(). Protect
+# against regressions.
+
+import iotests
+
+iotests.verify_image_format(supported_fmts=['qcow2'])
+iotests.verify_platform(['linux'])
+
+with iotests.FilePath('disk0.img') as disk0_img_path, \
+ iotests.FilePath('disk1.img') as disk1_img_path, \
+ iotests.FilePath('disk0-snap.img') as disk0_snap_img_path, \
+ iotests.FilePath('disk1-snap.img') as disk1_snap_img_path, \
+ iotests.VM() as vm:
+
+ img_size = '10M'
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk0_img_path, img_size)
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk1_img_path, img_size)
+
+ iotests.log('Launching VM...')
+ vm.launch()
+
+ iotests.log('Adding IOThread...')
+ iotests.log(vm.qmp('object-add',
+ qom_type='iothread',
+ id='iothread0'))
+
+ iotests.log('Adding blockdevs...')
+ iotests.log(vm.qmp('blockdev-add',
+ driver=iotests.imgfmt,
+ node_name='disk0',
+ file={
+ 'driver': 'file',
+ 'filename': disk0_img_path,
+ }))
+ iotests.log(vm.qmp('blockdev-add',
+ driver=iotests.imgfmt,
+ node_name='disk1',
+ file={
+ 'driver': 'file',
+ 'filename': disk1_img_path,
+ }))
+
+ iotests.log('Setting iothread...')
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
+ node_name='disk0',
+ iothread='iothread0'))
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
+ node_name='disk1',
+ iothread='iothread0'))
+
+ iotests.log('Creating external snapshots...')
+ iotests.log(vm.qmp(
+ 'transaction',
+ actions=[
+ {
+ 'data': {
+ 'node-name': 'disk0',
+ 'snapshot-file': disk0_snap_img_path,
+ 'snapshot-node-name': 'disk0-snap',
+ 'mode': 'absolute-paths',
+ 'format': iotests.imgfmt,
+ },
+ 'type': 'blockdev-snapshot-sync'
+ }, {
+ 'data': {
+ 'node-name': 'disk1',
+ 'snapshot-file': disk1_snap_img_path,
+ 'snapshot-node-name': 'disk1-snap',
+ 'mode': 'absolute-paths',
+ 'format': iotests.imgfmt
+ },
+ 'type': 'blockdev-snapshot-sync'
+ }
+ ]))
diff --git a/tests/qemu-iotests/202.out b/tests/qemu-iotests/202.out
new file mode 100644
index 0000000000..d5ea374e17
--- /dev/null
+++ b/tests/qemu-iotests/202.out
@@ -0,0 +1,11 @@
+Launching VM...
+Adding IOThread...
+{u'return': {}}
+Adding blockdevs...
+{u'return': {}}
+{u'return': {}}
+Setting iothread...
+{u'return': {}}
+{u'return': {}}
+Creating external snapshots...
+{u'return': {}}
diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203
new file mode 100755
index 0000000000..2c811917d8
--- /dev/null
+++ b/tests/qemu-iotests/203
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2017 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Creator/Owner: Stefan Hajnoczi <stefanha@redhat.com>
+#
+# Check that QMP 'migrate' with multiple drives on a single IOThread completes
+# successfully. This particular command triggered a hang in the source QEMU
+# process due to recursive AioContext locking in bdrv_invalidate_all() and
+# BDRV_POLL_WHILE().
+
+import iotests
+
+iotests.verify_image_format(supported_fmts=['qcow2'])
+iotests.verify_platform(['linux'])
+
+with iotests.FilePath('disk0.img') as disk0_img_path, \
+ iotests.FilePath('disk1.img') as disk1_img_path, \
+ iotests.VM() as vm:
+
+ img_size = '10M'
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk0_img_path, img_size)
+ iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, disk1_img_path, img_size)
+
+ iotests.log('Launching VM...')
+ (vm.add_object('iothread,id=iothread0')
+ .add_drive(disk0_img_path, 'node-name=drive0-node', interface='none')
+ .add_drive(disk1_img_path, 'node-name=drive1-node', interface='none')
+ .launch())
+
+ iotests.log('Setting IOThreads...')
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
+ node_name='drive0-node', iothread='iothread0',
+ force=True))
+ iotests.log(vm.qmp('x-blockdev-set-iothread',
+ node_name='drive1-node', iothread='iothread0',
+ force=True))
+
+ iotests.log('Starting migration...')
+ iotests.log(vm.qmp('migrate', uri='exec:cat >/dev/null'))
+ while True:
+ vm.get_qmp_event(wait=60.0)
+ result = vm.qmp('query-migrate')
+ status = result.get('return', {}).get('status', None)
+ if status == 'completed':
+ break
diff --git a/tests/qemu-iotests/203.out b/tests/qemu-iotests/203.out
new file mode 100644
index 0000000000..3f1ff900e4
--- /dev/null
+++ b/tests/qemu-iotests/203.out
@@ -0,0 +1,6 @@
+Launching VM...
+Setting IOThreads...
+{u'return': {}}
+{u'return': {}}
+Starting migration...
+{u'return': {}}
diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter
index d9237799e9..f08248bfd9 100644
--- a/tests/qemu-iotests/common.filter
+++ b/tests/qemu-iotests/common.filter
@@ -134,7 +134,8 @@ _filter_img_create()
-e "s# log_size=[0-9]\\+##g" \
-e "s# refcount_bits=[0-9]\\+##g" \
-e "s# key-secret=[a-zA-Z0-9]\\+##g" \
- -e "s# iter-time=[0-9]\\+##g"
+ -e "s# iter-time=[0-9]\\+##g" \
+ -e "s# force_size=\\(on\\|off\\)##g"
}
_filter_img_info()
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index 3e688678dd..93d96fb22f 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -197,3 +197,5 @@
197 rw auto quick
198 rw auto
200 rw auto
+202 rw auto quick
+203 rw auto
diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py
index 6f057904a9..44477e9295 100644
--- a/tests/qemu-iotests/iotests.py
+++ b/tests/qemu-iotests/iotests.py
@@ -197,6 +197,11 @@ class VM(qtest.QEMUQtestMachine):
socket_scm_helper=socket_scm_helper)
self._num_drives = 0
+ def add_object(self, opts):
+ self._args.append('-object')
+ self._args.append(opts)
+ return self
+
def add_device(self, opts):
self._args.append('-device')
self._args.append(opts)
diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
index d396185972..c8bec81520 100644
--- a/tests/test-aio-multithread.c
+++ b/tests/test-aio-multithread.c
@@ -11,7 +11,6 @@
*/
#include "qemu/osdep.h"
-#include <glib.h>
#include "block/aio.h"
#include "qapi/error.h"
#include "qemu/coroutine.h"
diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c
new file mode 100644
index 0000000000..d760e2b243
--- /dev/null
+++ b/tests/test-bdrv-drain.c
@@ -0,0 +1,651 @@
+/*
+ * Block node draining tests
+ *
+ * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "block/block.h"
+#include "block/blockjob_int.h"
+#include "sysemu/block-backend.h"
+#include "qapi/error.h"
+
+typedef struct BDRVTestState {
+ int drain_count;
+} BDRVTestState;
+
+static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs)
+{
+ BDRVTestState *s = bs->opaque;
+ s->drain_count++;
+}
+
+static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs)
+{
+ BDRVTestState *s = bs->opaque;
+ s->drain_count--;
+}
+
+static void bdrv_test_close(BlockDriverState *bs)
+{
+ BDRVTestState *s = bs->opaque;
+ g_assert_cmpint(s->drain_count, >, 0);
+}
+
+static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
+ uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
+{
+ /* We want this request to stay until the polling loop in drain waits for
+ * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
+ * first and polls its result, too, but it shouldn't accidentally complete
+ * this request yet. */
+ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
+
+ return 0;
+}
+
+static BlockDriver bdrv_test = {
+ .format_name = "test",
+ .instance_size = sizeof(BDRVTestState),
+
+ .bdrv_close = bdrv_test_close,
+ .bdrv_co_preadv = bdrv_test_co_preadv,
+
+ .bdrv_co_drain_begin = bdrv_test_co_drain_begin,
+ .bdrv_co_drain_end = bdrv_test_co_drain_end,
+
+ .bdrv_child_perm = bdrv_format_default_perms,
+};
+
+static void aio_ret_cb(void *opaque, int ret)
+{
+ int *aio_ret = opaque;
+ *aio_ret = ret;
+}
+
+typedef struct CallInCoroutineData {
+ void (*entry)(void);
+ bool done;
+} CallInCoroutineData;
+
+static coroutine_fn void call_in_coroutine_entry(void *opaque)
+{
+ CallInCoroutineData *data = opaque;
+
+ data->entry();
+ data->done = true;
+}
+
+static void call_in_coroutine(void (*entry)(void))
+{
+ Coroutine *co;
+ CallInCoroutineData data = {
+ .entry = entry,
+ .done = false,
+ };
+
+ co = qemu_coroutine_create(call_in_coroutine_entry, &data);
+ qemu_coroutine_enter(co);
+ while (!data.done) {
+ aio_poll(qemu_get_aio_context(), true);
+ }
+}
+
+enum drain_type {
+ BDRV_DRAIN_ALL,
+ BDRV_DRAIN,
+ BDRV_SUBTREE_DRAIN,
+ DRAIN_TYPE_MAX,
+};
+
+static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
+{
+ switch (drain_type) {
+ case BDRV_DRAIN_ALL: bdrv_drain_all_begin(); break;
+ case BDRV_DRAIN: bdrv_drained_begin(bs); break;
+ case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_begin(bs); break;
+ default: g_assert_not_reached();
+ }
+}
+
+static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
+{
+ switch (drain_type) {
+ case BDRV_DRAIN_ALL: bdrv_drain_all_end(); break;
+ case BDRV_DRAIN: bdrv_drained_end(bs); break;
+ case BDRV_SUBTREE_DRAIN: bdrv_subtree_drained_end(bs); break;
+ default: g_assert_not_reached();
+ }
+}
+
+static void test_drv_cb_common(enum drain_type drain_type, bool recursive)
+{
+ BlockBackend *blk;
+ BlockDriverState *bs, *backing;
+ BDRVTestState *s, *backing_s;
+ BlockAIOCB *acb;
+ int aio_ret;
+
+ QEMUIOVector qiov;
+ struct iovec iov = {
+ .iov_base = NULL,
+ .iov_len = 0,
+ };
+ qemu_iovec_init_external(&qiov, &iov, 1);
+
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
+ &error_abort);
+ s = bs->opaque;
+ blk_insert_bs(blk, bs, &error_abort);
+
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
+ backing_s = backing->opaque;
+ bdrv_set_backing_hd(bs, backing, &error_abort);
+
+ /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
+ g_assert_cmpint(s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ do_drain_begin(drain_type, bs);
+
+ g_assert_cmpint(s->drain_count, ==, 1);
+ g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
+
+ do_drain_end(drain_type, bs);
+
+ g_assert_cmpint(s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ /* Now do the same while a request is pending */
+ aio_ret = -EINPROGRESS;
+ acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
+ g_assert(acb != NULL);
+ g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
+
+ g_assert_cmpint(s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ do_drain_begin(drain_type, bs);
+
+ g_assert_cmpint(aio_ret, ==, 0);
+ g_assert_cmpint(s->drain_count, ==, 1);
+ g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
+
+ do_drain_end(drain_type, bs);
+
+ g_assert_cmpint(s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ bdrv_unref(backing);
+ bdrv_unref(bs);
+ blk_unref(blk);
+}
+
+static void test_drv_cb_drain_all(void)
+{
+ test_drv_cb_common(BDRV_DRAIN_ALL, true);
+}
+
+static void test_drv_cb_drain(void)
+{
+ test_drv_cb_common(BDRV_DRAIN, false);
+}
+
+static void test_drv_cb_drain_subtree(void)
+{
+ test_drv_cb_common(BDRV_SUBTREE_DRAIN, true);
+}
+
+static void test_drv_cb_co_drain(void)
+{
+ call_in_coroutine(test_drv_cb_drain);
+}
+
+static void test_drv_cb_co_drain_subtree(void)
+{
+ call_in_coroutine(test_drv_cb_drain_subtree);
+}
+
+static void test_quiesce_common(enum drain_type drain_type, bool recursive)
+{
+ BlockBackend *blk;
+ BlockDriverState *bs, *backing;
+
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
+ &error_abort);
+ blk_insert_bs(blk, bs, &error_abort);
+
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
+ bdrv_set_backing_hd(bs, backing, &error_abort);
+
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+
+ do_drain_begin(drain_type, bs);
+
+ g_assert_cmpint(bs->quiesce_counter, ==, 1);
+ g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
+
+ do_drain_end(drain_type, bs);
+
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+
+ bdrv_unref(backing);
+ bdrv_unref(bs);
+ blk_unref(blk);
+}
+
+static void test_quiesce_drain_all(void)
+{
+ // XXX drain_all doesn't quiesce
+ //test_quiesce_common(BDRV_DRAIN_ALL, true);
+}
+
+static void test_quiesce_drain(void)
+{
+ test_quiesce_common(BDRV_DRAIN, false);
+}
+
+static void test_quiesce_drain_subtree(void)
+{
+ test_quiesce_common(BDRV_SUBTREE_DRAIN, true);
+}
+
+static void test_quiesce_co_drain(void)
+{
+ call_in_coroutine(test_quiesce_drain);
+}
+
+static void test_quiesce_co_drain_subtree(void)
+{
+ call_in_coroutine(test_quiesce_drain_subtree);
+}
+
+static void test_nested(void)
+{
+ BlockBackend *blk;
+ BlockDriverState *bs, *backing;
+ BDRVTestState *s, *backing_s;
+ enum drain_type outer, inner;
+
+ blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
+ &error_abort);
+ s = bs->opaque;
+ blk_insert_bs(blk, bs, &error_abort);
+
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
+ backing_s = backing->opaque;
+ bdrv_set_backing_hd(bs, backing, &error_abort);
+
+ for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
+ for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
+ /* XXX bdrv_drain_all() doesn't increase the quiesce_counter */
+ int bs_quiesce = (outer != BDRV_DRAIN_ALL) +
+ (inner != BDRV_DRAIN_ALL);
+ int backing_quiesce = (outer == BDRV_SUBTREE_DRAIN) +
+ (inner == BDRV_SUBTREE_DRAIN);
+ int backing_cb_cnt = (outer != BDRV_DRAIN) +
+ (inner != BDRV_DRAIN);
+
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+ g_assert_cmpint(s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ do_drain_begin(outer, bs);
+ do_drain_begin(inner, bs);
+
+ g_assert_cmpint(bs->quiesce_counter, ==, bs_quiesce);
+ g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
+ g_assert_cmpint(s->drain_count, ==, 2);
+ g_assert_cmpint(backing_s->drain_count, ==, backing_cb_cnt);
+
+ do_drain_end(inner, bs);
+ do_drain_end(outer, bs);
+
+ g_assert_cmpint(bs->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+ g_assert_cmpint(s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+ }
+ }
+
+ bdrv_unref(backing);
+ bdrv_unref(bs);
+ blk_unref(blk);
+}
+
+static void test_multiparent(void)
+{
+ BlockBackend *blk_a, *blk_b;
+ BlockDriverState *bs_a, *bs_b, *backing;
+ BDRVTestState *a_s, *b_s, *backing_s;
+
+ blk_a = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
+ &error_abort);
+ a_s = bs_a->opaque;
+ blk_insert_bs(blk_a, bs_a, &error_abort);
+
+ blk_b = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
+ &error_abort);
+ b_s = bs_b->opaque;
+ blk_insert_bs(blk_b, bs_b, &error_abort);
+
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
+ backing_s = backing->opaque;
+ bdrv_set_backing_hd(bs_a, backing, &error_abort);
+ bdrv_set_backing_hd(bs_b, backing, &error_abort);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+ g_assert_cmpint(a_s->drain_count, ==, 0);
+ g_assert_cmpint(b_s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
+ g_assert_cmpint(backing->quiesce_counter, ==, 1);
+ g_assert_cmpint(a_s->drain_count, ==, 1);
+ g_assert_cmpint(b_s->drain_count, ==, 1);
+ g_assert_cmpint(backing_s->drain_count, ==, 1);
+
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 2);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
+ g_assert_cmpint(backing->quiesce_counter, ==, 2);
+ g_assert_cmpint(a_s->drain_count, ==, 2);
+ g_assert_cmpint(b_s->drain_count, ==, 2);
+ g_assert_cmpint(backing_s->drain_count, ==, 2);
+
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
+ g_assert_cmpint(backing->quiesce_counter, ==, 1);
+ g_assert_cmpint(a_s->drain_count, ==, 1);
+ g_assert_cmpint(b_s->drain_count, ==, 1);
+ g_assert_cmpint(backing_s->drain_count, ==, 1);
+
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+ g_assert_cmpint(a_s->drain_count, ==, 0);
+ g_assert_cmpint(b_s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ bdrv_unref(backing);
+ bdrv_unref(bs_a);
+ bdrv_unref(bs_b);
+ blk_unref(blk_a);
+ blk_unref(blk_b);
+}
+
+static void test_graph_change(void)
+{
+ BlockBackend *blk_a, *blk_b;
+ BlockDriverState *bs_a, *bs_b, *backing;
+ BDRVTestState *a_s, *b_s, *backing_s;
+
+ blk_a = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
+ &error_abort);
+ a_s = bs_a->opaque;
+ blk_insert_bs(blk_a, bs_a, &error_abort);
+
+ blk_b = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
+ &error_abort);
+ b_s = bs_b->opaque;
+ blk_insert_bs(blk_b, bs_b, &error_abort);
+
+ backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
+ backing_s = backing->opaque;
+ bdrv_set_backing_hd(bs_a, backing, &error_abort);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+ g_assert_cmpint(a_s->drain_count, ==, 0);
+ g_assert_cmpint(b_s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_a);
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
+ do_drain_begin(BDRV_SUBTREE_DRAIN, bs_b);
+
+ bdrv_set_backing_hd(bs_b, backing, &error_abort);
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
+ g_assert_cmpint(backing->quiesce_counter, ==, 5);
+ g_assert_cmpint(a_s->drain_count, ==, 5);
+ g_assert_cmpint(b_s->drain_count, ==, 5);
+ g_assert_cmpint(backing_s->drain_count, ==, 5);
+
+ bdrv_set_backing_hd(bs_b, NULL, &error_abort);
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 3);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 2);
+ g_assert_cmpint(backing->quiesce_counter, ==, 3);
+ g_assert_cmpint(a_s->drain_count, ==, 3);
+ g_assert_cmpint(b_s->drain_count, ==, 2);
+ g_assert_cmpint(backing_s->drain_count, ==, 3);
+
+ bdrv_set_backing_hd(bs_b, backing, &error_abort);
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 5);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 5);
+ g_assert_cmpint(backing->quiesce_counter, ==, 5);
+ g_assert_cmpint(a_s->drain_count, ==, 5);
+ g_assert_cmpint(b_s->drain_count, ==, 5);
+ g_assert_cmpint(backing_s->drain_count, ==, 5);
+
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_b);
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
+ do_drain_end(BDRV_SUBTREE_DRAIN, bs_a);
+
+ g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
+ g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
+ g_assert_cmpint(backing->quiesce_counter, ==, 0);
+ g_assert_cmpint(a_s->drain_count, ==, 0);
+ g_assert_cmpint(b_s->drain_count, ==, 0);
+ g_assert_cmpint(backing_s->drain_count, ==, 0);
+
+ bdrv_unref(backing);
+ bdrv_unref(bs_a);
+ bdrv_unref(bs_b);
+ blk_unref(blk_a);
+ blk_unref(blk_b);
+}
+
+
+typedef struct TestBlockJob {
+ BlockJob common;
+ bool should_complete;
+} TestBlockJob;
+
+static void test_job_completed(BlockJob *job, void *opaque)
+{
+ block_job_completed(job, 0);
+}
+
+static void coroutine_fn test_job_start(void *opaque)
+{
+ TestBlockJob *s = opaque;
+
+ while (!s->should_complete) {
+ block_job_sleep_ns(&s->common, 100000);
+ }
+
+ block_job_defer_to_main_loop(&s->common, test_job_completed, NULL);
+}
+
+static void test_job_complete(BlockJob *job, Error **errp)
+{
+ TestBlockJob *s = container_of(job, TestBlockJob, common);
+ s->should_complete = true;
+}
+
+BlockJobDriver test_job_driver = {
+ .instance_size = sizeof(TestBlockJob),
+ .start = test_job_start,
+ .complete = test_job_complete,
+};
+
+static void test_blockjob_common(enum drain_type drain_type)
+{
+ BlockBackend *blk_src, *blk_target;
+ BlockDriverState *src, *target;
+ BlockJob *job;
+ int ret;
+
+ src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
+ &error_abort);
+ blk_src = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ blk_insert_bs(blk_src, src, &error_abort);
+
+ target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
+ &error_abort);
+ blk_target = blk_new(BLK_PERM_ALL, BLK_PERM_ALL);
+ blk_insert_bs(blk_target, target, &error_abort);
+
+ job = block_job_create("job0", &test_job_driver, src, 0, BLK_PERM_ALL, 0,
+ 0, NULL, NULL, &error_abort);
+ block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
+ block_job_start(job);
+
+ g_assert_cmpint(job->pause_count, ==, 0);
+ g_assert_false(job->paused);
+ g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
+
+ do_drain_begin(drain_type, src);
+
+ if (drain_type == BDRV_DRAIN_ALL) {
+ /* bdrv_drain_all() drains both src and target */
+ g_assert_cmpint(job->pause_count, ==, 2);
+ } else {
+ g_assert_cmpint(job->pause_count, ==, 1);
+ }
+ /* XXX We don't wait until the job is actually paused. Is this okay? */
+ /* g_assert_true(job->paused); */
+ g_assert_false(job->busy); /* The job is paused */
+
+ do_drain_end(drain_type, src);
+
+ g_assert_cmpint(job->pause_count, ==, 0);
+ g_assert_false(job->paused);
+ g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
+
+ do_drain_begin(drain_type, target);
+
+ if (drain_type == BDRV_DRAIN_ALL) {
+ /* bdrv_drain_all() drains both src and target */
+ g_assert_cmpint(job->pause_count, ==, 2);
+ } else {
+ g_assert_cmpint(job->pause_count, ==, 1);
+ }
+ /* XXX We don't wait until the job is actually paused. Is this okay? */
+ /* g_assert_true(job->paused); */
+ g_assert_false(job->busy); /* The job is paused */
+
+ do_drain_end(drain_type, target);
+
+ g_assert_cmpint(job->pause_count, ==, 0);
+ g_assert_false(job->paused);
+ g_assert_false(job->busy); /* We're in block_job_sleep_ns() */
+
+ ret = block_job_complete_sync(job, &error_abort);
+ g_assert_cmpint(ret, ==, 0);
+
+ blk_unref(blk_src);
+ blk_unref(blk_target);
+ bdrv_unref(src);
+ bdrv_unref(target);
+}
+
+static void test_blockjob_drain_all(void)
+{
+ test_blockjob_common(BDRV_DRAIN_ALL);
+}
+
+static void test_blockjob_drain(void)
+{
+ test_blockjob_common(BDRV_DRAIN);
+}
+
+static void test_blockjob_drain_subtree(void)
+{
+ test_blockjob_common(BDRV_SUBTREE_DRAIN);
+}
+
+int main(int argc, char **argv)
+{
+ bdrv_init();
+ qemu_init_main_loop(&error_abort);
+
+ g_test_init(&argc, &argv, NULL);
+
+ g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
+ g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
+ g_test_add_func("/bdrv-drain/driver-cb/drain_subtree",
+ test_drv_cb_drain_subtree);
+
+ // XXX bdrv_drain_all() doesn't work in coroutine context
+ g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
+ g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree",
+ test_drv_cb_co_drain_subtree);
+
+
+ g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
+ g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
+ g_test_add_func("/bdrv-drain/quiesce/drain_subtree",
+ test_quiesce_drain_subtree);
+
+ // XXX bdrv_drain_all() doesn't work in coroutine context
+ g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
+ g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree",
+ test_quiesce_co_drain_subtree);
+
+ g_test_add_func("/bdrv-drain/nested", test_nested);
+ g_test_add_func("/bdrv-drain/multiparent", test_multiparent);
+ g_test_add_func("/bdrv-drain/graph-change", test_graph_change);
+
+ g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
+ g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
+ g_test_add_func("/bdrv-drain/blockjob/drain_subtree",
+ test_blockjob_drain_subtree);
+
+ return g_test_run();
+}
diff --git a/tests/test-char.c b/tests/test-char.c
index 7ac25ff73f..911e3f6e8d 100644
--- a/tests/test-char.c
+++ b/tests/test-char.c
@@ -5,6 +5,7 @@
#include "qemu/config-file.h"
#include "qemu/sockets.h"
#include "chardev/char-fe.h"
+#include "chardev/char-mux.h"
#include "sysemu/sysemu.h"
#include "qapi/error.h"
#include "qom/qom-qobject.h"
@@ -164,6 +165,7 @@ static void char_mux_test(void)
FeHandler h1 = { 0, }, h2 = { 0, };
CharBackend chr_be1, chr_be2;
+ muxes_realized = true; /* done after machine init */
opts = qemu_opts_create(qemu_find_opts("chardev"), "mux-label",
1, &error_abort);
qemu_opt_set(opts, "backend", "ringbuf", &error_abort);
@@ -201,8 +203,23 @@ static void char_mux_test(void)
g_assert_cmpstr(h2.read_buf, ==, "hello");
h2.read_count = 0;
+ g_assert_cmpint(h1.last_event, !=, 42); /* should be MUX_OUT or OPENED */
+ g_assert_cmpint(h2.last_event, !=, 42); /* should be MUX_IN or OPENED */
+ /* sending event on the base broadcast to all fe, historical reasons? */
+ qemu_chr_be_event(base, 42);
+ g_assert_cmpint(h1.last_event, ==, 42);
+ g_assert_cmpint(h2.last_event, ==, 42);
+ qemu_chr_be_event(chr, -1);
+ g_assert_cmpint(h1.last_event, ==, 42);
+ g_assert_cmpint(h2.last_event, ==, -1);
+
/* switch focus */
qemu_chr_be_write(base, (void *)"\1c", 2);
+ g_assert_cmpint(h1.last_event, ==, CHR_EVENT_MUX_IN);
+ g_assert_cmpint(h2.last_event, ==, CHR_EVENT_MUX_OUT);
+ qemu_chr_be_event(chr, -1);
+ g_assert_cmpint(h1.last_event, ==, -1);
+ g_assert_cmpint(h2.last_event, ==, CHR_EVENT_MUX_OUT);
qemu_chr_be_write(base, (void *)"hello", 6);
g_assert_cmpint(h2.read_count, ==, 0);
diff --git a/tests/test-clone-visitor.c b/tests/test-clone-visitor.c
index 96982163e4..ac6afc562e 100644
--- a/tests/test-clone-visitor.c
+++ b/tests/test-clone-visitor.c
@@ -8,7 +8,6 @@
*/
#include "qemu/osdep.h"
-#include <glib.h>
#include "qemu-common.h"
#include "qapi/clone-visitor.h"
diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c
index af41642346..9091c639b3 100644
--- a/tests/test-hbitmap.c
+++ b/tests/test-hbitmap.c
@@ -925,6 +925,61 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data,
hbitmap_iter_next(&hbi);
}
+static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start)
+{
+ int64_t ret1 = hbitmap_next_zero(data->hb, start);
+ int64_t ret2 = start;
+ for ( ; ret2 < data->size && hbitmap_get(data->hb, ret2); ret2++) {
+ ;
+ }
+ if (ret2 == data->size) {
+ ret2 = -1;
+ }
+
+ g_assert_cmpint(ret1, ==, ret2);
+}
+
+static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity)
+{
+ hbitmap_test_init(data, L3, granularity);
+ test_hbitmap_next_zero_check(data, 0);
+ test_hbitmap_next_zero_check(data, L3 - 1);
+
+ hbitmap_set(data->hb, L2, 1);
+ test_hbitmap_next_zero_check(data, 0);
+ test_hbitmap_next_zero_check(data, L2 - 1);
+ test_hbitmap_next_zero_check(data, L2);
+ test_hbitmap_next_zero_check(data, L2 + 1);
+
+ hbitmap_set(data->hb, L2 + 5, L1);
+ test_hbitmap_next_zero_check(data, 0);
+ test_hbitmap_next_zero_check(data, L2 + 1);
+ test_hbitmap_next_zero_check(data, L2 + 2);
+ test_hbitmap_next_zero_check(data, L2 + 5);
+ test_hbitmap_next_zero_check(data, L2 + L1 - 1);
+ test_hbitmap_next_zero_check(data, L2 + L1);
+
+ hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2);
+ test_hbitmap_next_zero_check(data, L2 * 2 - L1);
+ test_hbitmap_next_zero_check(data, L2 * 2 - 2);
+ test_hbitmap_next_zero_check(data, L2 * 2 - 1);
+ test_hbitmap_next_zero_check(data, L2 * 2);
+ test_hbitmap_next_zero_check(data, L3 - 1);
+
+ hbitmap_set(data->hb, 0, L3);
+ test_hbitmap_next_zero_check(data, 0);
+}
+
+static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused)
+{
+ test_hbitmap_next_zero_do(data, 0);
+}
+
+static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused)
+{
+ test_hbitmap_next_zero_do(data, 4);
+}
+
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
@@ -985,6 +1040,12 @@ int main(int argc, char **argv)
hbitmap_test_add("/hbitmap/iter/iter_and_reset",
test_hbitmap_iter_and_reset);
+
+ hbitmap_test_add("/hbitmap/next_zero/next_zero_0",
+ test_hbitmap_next_zero_0);
+ hbitmap_test_add("/hbitmap/next_zero/next_zero_4",
+ test_hbitmap_next_zero_4);
+
g_test_run();
return 0;
diff --git a/tests/test-hmp.c b/tests/test-hmp.c
index 5677fbf775..5b7e447b6a 100644
--- a/tests/test-hmp.c
+++ b/tests/test-hmp.c
@@ -78,10 +78,13 @@ static void test_commands(void)
int i;
for (i = 0; hmp_cmds[i] != NULL; i++) {
+ response = hmp("%s", hmp_cmds[i]);
if (verbose) {
- fprintf(stderr, "\t%s\n", hmp_cmds[i]);
+ fprintf(stderr,
+ "\texecute HMP command: %s\n"
+ "\tresult : %s\n",
+ hmp_cmds[i], response);
}
- response = hmp("%s", hmp_cmds[i]);
g_free(response);
}
diff --git a/tests/test-uuid.c b/tests/test-uuid.c
index d3a2791fd4..22b4b0727d 100644
--- a/tests/test-uuid.c
+++ b/tests/test-uuid.c
@@ -93,12 +93,18 @@ static inline bool uuid_is_valid(QemuUUID *uuid)
static void test_uuid_generate(void)
{
+ QemuUUID uuid_not_null = { { {
+ 0x58, 0x6e, 0xce, 0x27, 0x7f, 0x09, 0x41, 0xe0,
+ 0x9e, 0x74, 0xe9, 0x01, 0x31, 0x7e, 0x9d, 0x42
+ } } };
QemuUUID uuid;
int i;
for (i = 0; i < 100; ++i) {
qemu_uuid_generate(&uuid);
g_assert(uuid_is_valid(&uuid));
+ g_assert_false(qemu_uuid_is_null(&uuid));
+ g_assert_false(qemu_uuid_is_equal(&uuid_not_null, &uuid));
}
}
@@ -168,8 +174,8 @@ static void test_uuid_unparse_strdup(void)
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
- g_test_add_func("/uuid/generate", test_uuid_generate);
g_test_add_func("/uuid/is_null", test_uuid_is_null);
+ g_test_add_func("/uuid/generate", test_uuid_generate);
g_test_add_func("/uuid/parse", test_uuid_parse);
g_test_add_func("/uuid/unparse", test_uuid_unparse);
g_test_add_func("/uuid/unparse_strdup", test_uuid_unparse_strdup);
diff --git a/tests/vhost-user-test.c b/tests/vhost-user-test.c
index 4b98018478..e2c89ed376 100644
--- a/tests/vhost-user-test.c
+++ b/tests/vhost-user-test.c
@@ -21,7 +21,6 @@
#include "libqos/libqos.h"
#include "libqos/pci-pc.h"
#include "libqos/virtio-pci.h"
-#include "qapi/error.h"
#include "libqos/malloc-pc.h"
#include "hw/virtio/virtio-net.h"
diff --git a/tests/virtio-9p-test.c b/tests/virtio-9p-test.c
index ad33d96387..00f00f7246 100644
--- a/tests/virtio-9p-test.c
+++ b/tests/virtio-9p-test.c
@@ -18,6 +18,8 @@
#include "standard-headers/linux/virtio_pci.h"
#include "hw/9pfs/9p.h"
+#define QVIRTIO_9P_TIMEOUT_US (10 * 1000 * 1000)
+
static const char mount_tag[] = "qtest";
typedef struct {
@@ -73,6 +75,9 @@ static QVirtIO9P *qvirtio_9p_pci_start(void)
qvirtio_set_driver(v9p->dev);
v9p->vq = qvirtqueue_setup(v9p->dev, v9p->qs->alloc, 0);
+
+ qvirtio_set_driver_ok(v9p->dev);
+
return v9p;
}
@@ -111,6 +116,7 @@ typedef struct {
/* No r_size, it is hardcoded to P9_MAX_SIZE */
size_t t_off;
size_t r_off;
+ uint32_t free_head;
} P9Req;
static void v9fs_memwrite(P9Req *req, const void *addr, size_t len)
@@ -124,11 +130,6 @@ static void v9fs_memskip(P9Req *req, size_t len)
req->r_off += len;
}
-static void v9fs_memrewind(P9Req *req, size_t len)
-{
- req->r_off -= len;
-}
-
static void v9fs_memread(P9Req *req, void *addr, size_t len)
{
memread(req->r_msg + req->r_off, addr, len);
@@ -227,12 +228,12 @@ static P9Req *v9fs_req_init(QVirtIO9P *v9p, uint32_t size, uint8_t id,
static void v9fs_req_send(P9Req *req)
{
QVirtIO9P *v9p = req->v9p;
- uint32_t free_head;
req->r_msg = guest_alloc(v9p->qs->alloc, P9_MAX_SIZE);
- free_head = qvirtqueue_add(v9p->vq, req->t_msg, req->t_size, false, true);
+ req->free_head = qvirtqueue_add(v9p->vq, req->t_msg, req->t_size, false,
+ true);
qvirtqueue_add(v9p->vq, req->r_msg, P9_MAX_SIZE, true, false);
- qvirtqueue_kick(v9p->dev, v9p->vq, free_head);
+ qvirtqueue_kick(v9p->dev, v9p->vq, req->free_head);
req->t_off = 0;
}
@@ -250,19 +251,13 @@ static void v9fs_req_recv(P9Req *req, uint8_t id)
{
QVirtIO9P *v9p = req->v9p;
P9Hdr hdr;
- int i;
- for (i = 0; i < 10; i++) {
- qvirtio_wait_queue_isr(v9p->dev, v9p->vq, 1000 * 1000);
+ qvirtio_wait_used_elem(v9p->dev, v9p->vq, req->free_head,
+ QVIRTIO_9P_TIMEOUT_US);
- v9fs_memread(req, &hdr, 7);
- hdr.size = ldl_le_p(&hdr.size);
- hdr.tag = lduw_le_p(&hdr.tag);
- if (hdr.size >= 7) {
- break;
- }
- v9fs_memrewind(req, 7);
- }
+ v9fs_memread(req, &hdr, 7);
+ hdr.size = ldl_le_p(&hdr.size);
+ hdr.tag = lduw_le_p(&hdr.tag);
g_assert_cmpint(hdr.size, >=, 7);
g_assert_cmpint(hdr.size, <=, P9_MAX_SIZE);
diff --git a/tests/vmgenid-test.c b/tests/vmgenid-test.c
index 5a86b40775..68ff954578 100644
--- a/tests/vmgenid-test.c
+++ b/tests/vmgenid-test.c
@@ -8,9 +8,6 @@
* See the COPYING file in the top-level directory.
*/
-#include <glib.h>
-#include <string.h>
-#include <unistd.h>
#include "qemu/osdep.h"
#include "qemu/bitmap.h"
#include "qemu/uuid.h"
diff --git a/tpm.c b/tpm.c
index ab5d29e91e..61a434185a 100644
--- a/tpm.c
+++ b/tpm.c
@@ -23,13 +23,6 @@
static QLIST_HEAD(, TPMBackend) tpm_backends =
QLIST_HEAD_INITIALIZER(tpm_backends);
-static bool tpm_models[TPM_MODEL__MAX];
-
-void tpm_register_model(enum TpmModel model)
-{
- tpm_models[model] = true;
-}
-
static const TPMBackendClass *
tpm_be_find_by_type(enum TpmType type)
{
@@ -69,7 +62,7 @@ static void tpm_display_backend_drivers(void)
/*
* Find the TPM with the given Id
*/
-TPMBackend *qemu_find_tpm(const char *id)
+TPMBackend *qemu_find_tpm_be(const char *id)
{
TPMBackend *drv;
@@ -127,17 +120,12 @@ static int tpm_init_tpmdev(void *dummy, QemuOpts *opts, Error **errp)
return 1;
}
- drv = be->create(opts, id);
+ drv = be->create(opts);
if (!drv) {
return 1;
}
- tpm_backend_open(drv, &local_err);
- if (local_err) {
- error_report_err(local_err);
- return 1;
- }
-
+ drv->id = g_strdup(id);
QLIST_INSERT_HEAD(&tpm_backends, drv, list);
return 0;
@@ -200,9 +188,10 @@ TPMInfoList *qmp_query_tpm(Error **errp)
TPMInfoList *info, *head = NULL, *cur_item = NULL;
QLIST_FOREACH(drv, &tpm_backends, list) {
- if (!tpm_models[drv->fe_model]) {
+ if (!drv->tpmif) {
continue;
}
+
info = g_new0(TPMInfoList, 1);
info->value = tpm_backend_query_tpm(drv);
@@ -240,18 +229,16 @@ TpmTypeList *qmp_query_tpm_types(Error **errp)
return head;
}
-
TpmModelList *qmp_query_tpm_models(Error **errp)
{
- unsigned int i = 0;
TpmModelList *head = NULL, *prev = NULL, *cur_item;
+ GSList *e, *l = object_class_get_list(TYPE_TPM_IF, false);
+
+ for (e = l; e; e = e->next) {
+ TPMIfClass *c = TPM_IF_CLASS(e->data);
- for (i = 0; i < TPM_MODEL__MAX; i++) {
- if (!tpm_models[i]) {
- continue;
- }
cur_item = g_new0(TpmModelList, 1);
- cur_item->value = i;
+ cur_item->value = c->model;
if (prev) {
prev->next = cur_item;
@@ -261,6 +248,7 @@ TpmModelList *qmp_query_tpm_models(Error **errp)
}
prev = cur_item;
}
+ g_slist_free(l);
return head;
}
diff --git a/trace-events b/trace-events
index 1d2eb5d3e4..3695959d0a 100644
--- a/trace-events
+++ b/trace-events
@@ -68,6 +68,34 @@ flatview_new(FlatView *view, MemoryRegion *root) "%p (root %p)"
flatview_destroy(FlatView *view, MemoryRegion *root) "%p (root %p)"
flatview_destroy_rcu(FlatView *view, MemoryRegion *root) "%p (root %p)"
+# gdbstub.c
+gdbstub_op_start(char const *device) "Starting gdbstub using device %s"
+gdbstub_op_exiting(uint8_t code) "notifying exit with code=0x%02x"
+gdbstub_op_continue(void) "Continuing all CPUs"
+gdbstub_op_continue_cpu(int cpu_index) "Continuing CPU %d"
+gdbstub_op_stepping(int cpu_index) "Stepping CPU %d"
+gdbstub_op_extra_info(char const *info) "Thread extra info: %s"
+gdbstub_hit_watchpoint(char const *type, int cpu_gdb_index, uint64_t vaddr) "Watchpoint hit, type=\"%s\" cpu=%d, vaddr=0x%" PRIx64 ""
+gdbstub_hit_internal_error(void) "RUN_STATE_INTERNAL_ERROR"
+gdbstub_hit_break(void) "RUN_STATE_DEBUG"
+gdbstub_hit_paused(void) "RUN_STATE_PAUSED"
+gdbstub_hit_shutdown(void) "RUN_STATE_SHUTDOWN"
+gdbstub_hit_io_error(void) "RUN_STATE_IO_ERROR"
+gdbstub_hit_watchdog(void) "RUN_STATE_WATCHDOG"
+gdbstub_hit_unknown(int state) "Unknown run state=0x%x"
+gdbstub_io_reply(char const *message) "Sent: %s"
+gdbstub_io_binaryreply(size_t ofs, char const *line) "0x%04zx: %s"
+gdbstub_io_command(char const *command) "Received: %s"
+gdbstub_io_got_ack(void) "Got ACK"
+gdbstub_io_got_unexpected(uint8_t ch) "Got 0x%02x when expecting ACK/NACK"
+gdbstub_err_got_nack(void) "Got NACK, retransmitting"
+gdbstub_err_garbage(uint8_t ch) "received garbage between packets: 0x%02x"
+gdbstub_err_overrun(void) "command buffer overrun, dropping command"
+gdbstub_err_invalid_repeat(uint8_t ch) "got invalid RLE count: 0x%02x"
+gdbstub_err_invalid_rle(void) "got invalid RLE sequence"
+gdbstub_err_checksum_invalid(uint8_t ch) "got invalid command checksum digit: 0x%02x"
+gdbstub_err_checksum_incorrect(uint8_t expected, uint8_t got) "got command packet with incorrect checksum, expected=0x%02x, received=0x%02x"
+
### Guest events, keep at bottom
diff --git a/trace/ftrace.c b/trace/ftrace.c
index 7de104deba..61692a8682 100644
--- a/trace/ftrace.c
+++ b/trace/ftrace.c
@@ -15,10 +15,11 @@
int trace_marker_fd;
-static int find_debugfs(char *debugfs)
+static int find_mount(char *mount_point, const char *fstype)
{
char type[100];
FILE *fp;
+ int ret = 0;
fp = fopen("/proc/mounts", "r");
if (fp == NULL) {
@@ -26,29 +27,33 @@ static int find_debugfs(char *debugfs)
}
while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n",
- debugfs, type) == 2) {
- if (strcmp(type, "debugfs") == 0) {
+ mount_point, type) == 2) {
+ if (strcmp(type, fstype) == 0) {
+ ret = 1;
break;
}
}
fclose(fp);
- if (strcmp(type, "debugfs") != 0) {
- return 0;
- }
- return 1;
+ return ret;
}
bool ftrace_init(void)
{
- char debugfs[PATH_MAX];
+ char mount_point[PATH_MAX];
char path[PATH_MAX];
- int debugfs_found;
+ int tracefs_found;
int trace_fd = -1;
+ const char *subdir = "";
+
+ tracefs_found = find_mount(mount_point, "tracefs");
+ if (!tracefs_found) {
+ tracefs_found = find_mount(mount_point, "debugfs");
+ subdir = "/tracing";
+ }
- debugfs_found = find_debugfs(debugfs);
- if (debugfs_found) {
- snprintf(path, PATH_MAX, "%s/tracing/tracing_on", debugfs);
+ if (tracefs_found) {
+ snprintf(path, PATH_MAX, "%s%s/tracing_on", mount_point, subdir);
trace_fd = open(path, O_WRONLY);
if (trace_fd < 0) {
if (errno == EACCES) {
@@ -67,14 +72,14 @@ bool ftrace_init(void)
}
close(trace_fd);
}
- snprintf(path, PATH_MAX, "%s/tracing/trace_marker", debugfs);
+ snprintf(path, PATH_MAX, "%s%s/trace_marker", mount_point, subdir);
trace_marker_fd = open(path, O_WRONLY);
if (trace_marker_fd < 0) {
perror("Could not open ftrace 'trace_marker' file");
return false;
}
} else {
- fprintf(stderr, "debugfs is not mounted\n");
+ fprintf(stderr, "tracefs is not mounted\n");
return false;
}
diff --git a/ui/input-keymap.c b/ui/input-keymap.c
index 3a19a169f5..663986a17b 100644
--- a/ui/input-keymap.c
+++ b/ui/input-keymap.c
@@ -8,6 +8,7 @@
#include "ui/input-keymap-linux-to-qcode.c"
#include "ui/input-keymap-qcode-to-qnum.c"
#include "ui/input-keymap-qnum-to-qcode.c"
+#include "ui/input-keymap-qcode-to-linux.c"
int qemu_input_linux_to_qcode(unsigned int lnx)
{
diff --git a/util/hbitmap.c b/util/hbitmap.c
index 2f9d0fdbd0..289778a55c 100644
--- a/util/hbitmap.c
+++ b/util/hbitmap.c
@@ -188,6 +188,45 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
}
}
+int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
+{
+ size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
+ unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
+ uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
+ unsigned long cur = last_lev[pos];
+ unsigned start_bit_offset =
+ (start >> hb->granularity) & (BITS_PER_LONG - 1);
+ int64_t res;
+
+ cur |= (1UL << start_bit_offset) - 1;
+ assert((start >> hb->granularity) < hb->size);
+
+ if (cur == (unsigned long)-1) {
+ do {
+ pos++;
+ } while (pos < sz && last_lev[pos] == (unsigned long)-1);
+
+ if (pos >= sz) {
+ return -1;
+ }
+
+ cur = last_lev[pos];
+ }
+
+ res = (pos << BITS_PER_LEVEL) + ctol(cur);
+ if (res >= hb->size) {
+ return -1;
+ }
+
+ res = res << hb->granularity;
+ if (res < start) {
+ assert(((start - res) >> hb->granularity) == 0);
+ return start;
+ }
+
+ return res;
+}
+
bool hbitmap_empty(const HBitmap *hb)
{
return hb->count == 0;
diff --git a/util/memfd.c b/util/memfd.c
index 4571d1aba8..412e94a405 100644
--- a/util/memfd.c
+++ b/util/memfd.c
@@ -31,9 +31,7 @@
#include "qemu/memfd.h"
-#ifdef CONFIG_MEMFD
-#include <sys/memfd.h>
-#elif defined CONFIG_LINUX
+#if defined CONFIG_LINUX && !defined CONFIG_MEMFD
#include <sys/syscall.h>
#include <asm/unistd.h>
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index 3ec029a9ea..2fd8cbcc6f 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -35,6 +35,10 @@ size_t qemu_fd_getpagesize(int fd)
return fs.f_bsize;
}
}
+#ifdef __sparc__
+ /* SPARC Linux needs greater alignment than the pagesize */
+ return QEMU_VMALLOC_ALIGN;
+#endif
#endif
return getpagesize();
@@ -60,6 +64,10 @@ size_t qemu_mempath_getpagesize(const char *mem_path)
/* It's hugepage, return the huge page size */
return fs.f_bsize;
}
+#ifdef __sparc__
+ /* SPARC Linux needs greater alignment than the pagesize */
+ return QEMU_VMALLOC_ALIGN;
+#endif
#endif
return getpagesize();
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
index 254349cdbb..afb678fbe5 100644
--- a/util/qemu-coroutine-sleep.c
+++ b/util/qemu-coroutine-sleep.c
@@ -31,9 +31,9 @@ static void co_sleep_cb(void *opaque)
aio_co_wake(sleep_cb->co);
}
-void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
- int64_t ns)
+void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns)
{
+ AioContext *ctx = qemu_get_current_aio_context();
CoSleepCB sleep_cb = {
.co = qemu_coroutine_self(),
};
diff --git a/util/qemu-option.c b/util/qemu-option.c
index 9b1dc8093b..553d3dc552 100644
--- a/util/qemu-option.c
+++ b/util/qemu-option.c
@@ -91,40 +91,6 @@ const char *get_opt_value(char *buf, int buf_size, const char *p)
return p;
}
-int get_next_param_value(char *buf, int buf_size,
- const char *tag, const char **pstr)
-{
- const char *p;
- char option[128];
-
- p = *pstr;
- for(;;) {
- p = get_opt_name(option, sizeof(option), p, '=');
- if (*p != '=')
- break;
- p++;
- if (!strcmp(tag, option)) {
- *pstr = get_opt_value(buf, buf_size, p);
- if (**pstr == ',') {
- (*pstr)++;
- }
- return strlen(buf);
- } else {
- p = get_opt_value(NULL, 0, p);
- }
- if (*p != ',')
- break;
- p++;
- }
- return 0;
-}
-
-int get_param_value(char *buf, int buf_size,
- const char *tag, const char *str)
-{
- return get_next_param_value(buf, buf_size, tag, &str);
-}
-
static void parse_option_bool(const char *name, const char *value, bool *ret,
Error **errp)
{
@@ -766,7 +732,7 @@ void qemu_opts_print(QemuOpts *opts, const char *separator)
}
for (; desc && desc->name; desc++) {
const char *value;
- QemuOpt *opt = qemu_opt_find(opts, desc->name);
+ opt = qemu_opt_find(opts, desc->name);
value = opt ? opt->str : desc->def_value_str;
if (!value) {
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index a1cf47e625..d6a1e1759e 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -26,7 +26,6 @@
#include "qapi/error.h"
#include "qemu/sockets.h"
#include "qemu/main-loop.h"
-#include "qapi/clone-visitor.h"
#include "qapi/qobject-input-visitor.h"
#include "qapi/qobject-output-visitor.h"
#include "qapi-visit.h"
@@ -199,7 +198,6 @@ static int try_bind(int socket, InetSocketAddress *saddr, struct addrinfo *e)
static int inet_listen_saddr(InetSocketAddress *saddr,
int port_offset,
- bool update_addr,
Error **errp)
{
struct addrinfo ai,*res,*e;
@@ -327,15 +325,6 @@ listen_failed:
return -1;
listen_ok:
- if (update_addr) {
- g_free(saddr->host);
- saddr->host = g_strdup(uaddr);
- g_free(saddr->port);
- saddr->port = g_strdup_printf("%d",
- inet_getport(e) - port_offset);
- saddr->has_ipv6 = saddr->ipv6 = e->ai_family == PF_INET6;
- saddr->has_ipv4 = saddr->ipv4 = e->ai_family != PF_INET6;
- }
freeaddrinfo(res);
return slisten;
}
@@ -791,7 +780,6 @@ static int vsock_parse(VsockSocketAddress *addr, const char *str,
#ifndef _WIN32
static int unix_listen_saddr(UnixSocketAddress *saddr,
- bool update_addr,
Error **errp)
{
struct sockaddr_un un;
@@ -856,12 +844,7 @@ static int unix_listen_saddr(UnixSocketAddress *saddr,
goto err;
}
- if (update_addr && pathbuf) {
- g_free(saddr->path);
- saddr->path = pathbuf;
- } else {
- g_free(pathbuf);
- }
+ g_free(pathbuf);
return sock;
err:
@@ -921,7 +904,6 @@ static int unix_connect_saddr(UnixSocketAddress *saddr, Error **errp)
#else
static int unix_listen_saddr(UnixSocketAddress *saddr,
- bool update_addr,
Error **errp)
{
error_setg(errp, "unix sockets are not available on windows");
@@ -938,7 +920,7 @@ static int unix_connect_saddr(UnixSocketAddress *saddr, Error **errp)
#endif
/* compatibility wrapper */
-int unix_listen(const char *str, char *ostr, int olen, Error **errp)
+int unix_listen(const char *str, Error **errp)
{
char *path, *optstr;
int sock, len;
@@ -958,11 +940,7 @@ int unix_listen(const char *str, char *ostr, int olen, Error **errp)
saddr->path = g_strdup(str);
}
- sock = unix_listen_saddr(saddr, true, errp);
-
- if (sock != -1 && ostr) {
- snprintf(ostr, olen, "%s%s", saddr->path, optstr ? optstr : "");
- }
+ sock = unix_listen_saddr(saddr, errp);
qapi_free_UnixSocketAddress(saddr);
return sock;
@@ -1053,11 +1031,11 @@ int socket_listen(SocketAddress *addr, Error **errp)
switch (addr->type) {
case SOCKET_ADDRESS_TYPE_INET:
- fd = inet_listen_saddr(&addr->u.inet, 0, false, errp);
+ fd = inet_listen_saddr(&addr->u.inet, 0, errp);
break;
case SOCKET_ADDRESS_TYPE_UNIX:
- fd = unix_listen_saddr(&addr->u.q_unix, false, errp);
+ fd = unix_listen_saddr(&addr->u.q_unix, errp);
break;
case SOCKET_ADDRESS_TYPE_FD:
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index 7306475899..959a57079f 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -479,15 +479,29 @@ static void __attribute__((constructor)) qemu_thread_atexit_init(void)
}
-/* Attempt to set the threads name; note that this is for debug, so
- * we're not going to fail if we can't set it.
- */
-static void qemu_thread_set_name(QemuThread *thread, const char *name)
-{
#ifdef CONFIG_PTHREAD_SETNAME_NP
- pthread_setname_np(thread->thread, name);
-#endif
+typedef struct {
+ void *(*start_routine)(void *);
+ void *arg;
+ char *name;
+} QemuThreadArgs;
+
+static void *qemu_thread_start(void *args)
+{
+ QemuThreadArgs *qemu_thread_args = args;
+ void *(*start_routine)(void *) = qemu_thread_args->start_routine;
+ void *arg = qemu_thread_args->arg;
+
+ /* Attempt to set the threads name; note that this is for debug, so
+ * we're not going to fail if we can't set it.
+ */
+ pthread_setname_np(pthread_self(), qemu_thread_args->name);
+ g_free(qemu_thread_args->name);
+ g_free(qemu_thread_args);
+ return start_routine(arg);
}
+#endif
+
void qemu_thread_create(QemuThread *thread, const char *name,
void *(*start_routine)(void*),
@@ -502,23 +516,34 @@ void qemu_thread_create(QemuThread *thread, const char *name,
error_exit(err, __func__);
}
+ if (mode == QEMU_THREAD_DETACHED) {
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ }
+
/* Leave signal handling to the iothread. */
sigfillset(&set);
pthread_sigmask(SIG_SETMASK, &set, &oldset);
- err = pthread_create(&thread->thread, &attr, start_routine, arg);
- if (err)
- error_exit(err, __func__);
+#ifdef CONFIG_PTHREAD_SETNAME_NP
if (name_threads) {
- qemu_thread_set_name(thread, name);
+ QemuThreadArgs *qemu_thread_args;
+ qemu_thread_args = g_new0(QemuThreadArgs, 1);
+ qemu_thread_args->name = g_strdup(name);
+ qemu_thread_args->start_routine = start_routine;
+ qemu_thread_args->arg = arg;
+
+ err = pthread_create(&thread->thread, &attr,
+ qemu_thread_start, qemu_thread_args);
+ } else
+#endif
+ {
+ err = pthread_create(&thread->thread, &attr,
+ start_routine, arg);
}
- if (mode == QEMU_THREAD_DETACHED) {
- err = pthread_detach(thread->thread);
- if (err) {
- error_exit(err, __func__);
- }
- }
+ if (err)
+ error_exit(err, __func__);
+
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
pthread_attr_destroy(&attr);
diff --git a/util/rcu.c b/util/rcu.c
index ca5a63e36a..f4d09c8304 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -32,6 +32,9 @@
#include "qemu/atomic.h"
#include "qemu/thread.h"
#include "qemu/main-loop.h"
+#if defined(CONFIG_MALLOC_TRIM)
+#include <malloc.h>
+#endif
/*
* Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
@@ -246,6 +249,9 @@ static void *call_rcu_thread(void *opaque)
qemu_event_reset(&rcu_call_ready_event);
n = atomic_read(&rcu_call_count);
if (n == 0) {
+#if defined(CONFIG_MALLOC_TRIM)
+ malloc_trim(4 * 1024 * 1024);
+#endif
qemu_event_wait(&rcu_call_ready_event);
}
}
diff --git a/util/uuid.c b/util/uuid.c
index dd6b5fdf05..ebf06c049a 100644
--- a/util/uuid.c
+++ b/util/uuid.c
@@ -41,7 +41,12 @@ void qemu_uuid_generate(QemuUUID *uuid)
int qemu_uuid_is_null(const QemuUUID *uu)
{
static QemuUUID null_uuid;
- return memcmp(uu, &null_uuid, sizeof(QemuUUID)) == 0;
+ return qemu_uuid_is_equal(uu, &null_uuid);
+}
+
+int qemu_uuid_is_equal(const QemuUUID *lhv, const QemuUUID *rhv)
+{
+ return memcmp(lhv, rhv, sizeof(QemuUUID)) == 0;
}
void qemu_uuid_unparse(const QemuUUID *uuid, char *out)
diff --git a/vl.c b/vl.c
index 1ad1c04637..444b7507da 100644
--- a/vl.c
+++ b/vl.c
@@ -57,9 +57,9 @@ int main(int argc, char **argv)
#include "hw/boards.h"
#include "sysemu/accel.h"
#include "hw/usb.h"
-#include "hw/i386/pc.h"
#include "hw/isa/isa.h"
#include "hw/scsi/scsi.h"
+#include "hw/display/vga.h"
#include "hw/bt.h"
#include "sysemu/watchdog.h"
#include "hw/smbios/smbios.h"
@@ -95,7 +95,6 @@ int main(int argc, char **argv)
#include "sysemu/kvm.h"
#include "sysemu/hax.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qobject-input-visitor.h"
#include "qapi-visit.h"
#include "qapi/qmp/qjson.h"
#include "qemu/option.h"
@@ -1479,28 +1478,6 @@ done:
return 0;
}
-static int usb_device_del(const char *devname)
-{
- int bus_num, addr;
- const char *p;
-
- if (strstart(devname, "host:", &p)) {
- return -1;
- }
-
- if (!machine_usb(current_machine)) {
- return -1;
- }
-
- p = strchr(devname, '.');
- if (!p)
- return -1;
- bus_num = strtoul(devname, NULL, 0);
- addr = strtoul(p + 1, NULL, 0);
-
- return usb_device_delete_addr(bus_num, addr);
-}
-
static int usb_parse(const char *cmdline)
{
int r;
@@ -1511,28 +1488,6 @@ static int usb_parse(const char *cmdline)
return r;
}
-void hmp_usb_add(Monitor *mon, const QDict *qdict)
-{
- const char *devname = qdict_get_str(qdict, "devname");
-
- error_report("usb_add is deprecated, please use device_add instead");
-
- if (usb_device_add(devname) < 0) {
- error_report("could not add USB device '%s'", devname);
- }
-}
-
-void hmp_usb_del(Monitor *mon, const QDict *qdict)
-{
- const char *devname = qdict_get_str(qdict, "devname");
-
- error_report("usb_del is deprecated, please use device_del instead");
-
- if (usb_device_del(devname) < 0) {
- error_report("could not delete USB device '%s'", devname);
- }
-}
-
/***********************************************************/
/* machine registration */
@@ -3097,9 +3052,8 @@ int main(int argc, char **argv, char **envp)
const char *boot_order = NULL;
const char *boot_once = NULL;
DisplayState *ds;
- int cyls, heads, secs, translation;
QemuOpts *opts, *machine_opts;
- QemuOpts *hda_opts = NULL, *icount_opts = NULL, *accel_opts = NULL;
+ QemuOpts *icount_opts = NULL, *accel_opts = NULL;
QemuOptsList *olist;
int optind;
const char *optarg;
@@ -3191,8 +3145,6 @@ int main(int argc, char **argv, char **envp)
cpu_model = NULL;
snapshot = 0;
- cyls = heads = secs = 0;
- translation = BIOS_ATA_TRANSLATION_AUTO;
nb_nics = 0;
@@ -3231,7 +3183,7 @@ int main(int argc, char **argv, char **envp)
if (optind >= argc)
break;
if (argv[optind][0] != '-') {
- hda_opts = drive_add(IF_DEFAULT, 0, argv[optind++], HD_OPTS);
+ drive_add(IF_DEFAULT, 0, argv[optind++], HD_OPTS);
} else {
const QEMUOption *popt;
@@ -3251,21 +3203,6 @@ int main(int argc, char **argv, char **envp)
cpu_model = optarg;
break;
case QEMU_OPTION_hda:
- {
- char buf[256];
- if (cyls == 0)
- snprintf(buf, sizeof(buf), "%s", HD_OPTS);
- else
- snprintf(buf, sizeof(buf),
- "%s,cyls=%d,heads=%d,secs=%d%s",
- HD_OPTS , cyls, heads, secs,
- translation == BIOS_ATA_TRANSLATION_LBA ?
- ",trans=lba" :
- translation == BIOS_ATA_TRANSLATION_NONE ?
- ",trans=none" : "");
- drive_add(IF_DEFAULT, 0, optarg, buf);
- break;
- }
case QEMU_OPTION_hdb:
case QEMU_OPTION_hdc:
case QEMU_OPTION_hdd:
@@ -3316,70 +3253,6 @@ int main(int argc, char **argv, char **envp)
case QEMU_OPTION_snapshot:
snapshot = 1;
break;
- case QEMU_OPTION_hdachs:
- {
- const char *p;
- p = optarg;
- cyls = strtol(p, (char **)&p, 0);
- if (cyls < 1 || cyls > 16383)
- goto chs_fail;
- if (*p != ',')
- goto chs_fail;
- p++;
- heads = strtol(p, (char **)&p, 0);
- if (heads < 1 || heads > 16)
- goto chs_fail;
- if (*p != ',')
- goto chs_fail;
- p++;
- secs = strtol(p, (char **)&p, 0);
- if (secs < 1 || secs > 63)
- goto chs_fail;
- if (*p == ',') {
- p++;
- if (!strcmp(p, "large")) {
- translation = BIOS_ATA_TRANSLATION_LARGE;
- } else if (!strcmp(p, "rechs")) {
- translation = BIOS_ATA_TRANSLATION_RECHS;
- } else if (!strcmp(p, "none")) {
- translation = BIOS_ATA_TRANSLATION_NONE;
- } else if (!strcmp(p, "lba")) {
- translation = BIOS_ATA_TRANSLATION_LBA;
- } else if (!strcmp(p, "auto")) {
- translation = BIOS_ATA_TRANSLATION_AUTO;
- } else {
- goto chs_fail;
- }
- } else if (*p != '\0') {
- chs_fail:
- error_report("invalid physical CHS format");
- exit(1);
- }
- if (hda_opts != NULL) {
- qemu_opt_set_number(hda_opts, "cyls", cyls,
- &error_abort);
- qemu_opt_set_number(hda_opts, "heads", heads,
- &error_abort);
- qemu_opt_set_number(hda_opts, "secs", secs,
- &error_abort);
- if (translation == BIOS_ATA_TRANSLATION_LARGE) {
- qemu_opt_set(hda_opts, "trans", "large",
- &error_abort);
- } else if (translation == BIOS_ATA_TRANSLATION_RECHS) {
- qemu_opt_set(hda_opts, "trans", "rechs",
- &error_abort);
- } else if (translation == BIOS_ATA_TRANSLATION_LBA) {
- qemu_opt_set(hda_opts, "trans", "lba",
- &error_abort);
- } else if (translation == BIOS_ATA_TRANSLATION_NONE) {
- qemu_opt_set(hda_opts, "trans", "none",
- &error_abort);
- }
- }
- }
- error_report("'-hdachs' is deprecated, please use '-device"
- " ide-hd,cyls=c,heads=h,secs=s,...' instead");
- break;
case QEMU_OPTION_numa:
opts = qemu_opts_parse_noisily(qemu_find_opts("numa"),
optarg, true);
@@ -3862,10 +3735,6 @@ int main(int argc, char **argv, char **envp)
olist = qemu_find_opts("machine");
qemu_opts_parse_noisily(olist, "accel=tcg", false);
break;
- case QEMU_OPTION_no_kvm_pit: {
- warn_report("ignoring deprecated option");
- break;
- }
case QEMU_OPTION_no_kvm_pit_reinjection: {
static GlobalProperty kvm_pit_lost_tick_policy = {
.driver = "kvm-pit",